Exemple #1
0
    def ensure_manifest(host, path=None):
        """Updates the MANIFEST.json file, or generates if it does not exist.

        Args:
            path: The path to a WPT root (relative to web_tests, optional).
        """
        if path is None:
            path = host.filesystem.join('external', 'wpt')
        finder = PathFinder(host.filesystem)
        wpt_path = finder.path_from_web_tests(path)
        manifest_path = host.filesystem.join(wpt_path, MANIFEST_NAME)

        # TODO(crbug.com/853815): perhaps also cache the manifest for wpt_internal.
        if 'external' in path:
            base_manifest_path = finder.path_from_web_tests(
                'external', BASE_MANIFEST_NAME)
            if not host.filesystem.exists(base_manifest_path):
                _log.error('Manifest base not found at "%s".',
                           base_manifest_path)
                host.filesystem.write_text_file(base_manifest_path, '{}')

            # Unconditionally replace MANIFEST.json with the base manifest even if
            # the former exists, to avoid regenerating the manifest from scratch
            # when the manifest version changes. Remove the destination first as
            # copyfile will fail if the two files are hardlinked or symlinked.
            if host.filesystem.exists(manifest_path):
                host.filesystem.remove(manifest_path)
            host.filesystem.copyfile(base_manifest_path, manifest_path)

        WPTManifest.generate_manifest(host, wpt_path)

        _log.debug('Manifest generation completed.')
Exemple #2
0
    def test_update(self):
        host = MockHost()
        filesystem = host.filesystem
        finder = PathFinder(filesystem)

        flag_expectations_file = finder.path_from_web_tests(
            'FlagExpectations', 'foo')
        filesystem.write_text_file(
            flag_expectations_file,
            'something/pass-unexpectedly-mac.html [ Fail ]')

        self._setup_mock_results(host.buildbot)
        cmd = ['update', '--flag=--foo']
        TryFlag(cmd, host, MockGitCL(host, self.mock_try_results)).run()

        def results_url(build):
            return '%s/%s/%s/%s/layout-test-results/results.html' % (
                'https://test-results.appspot.com/data/layout_results',
                build.builder_name, build.build_number,
                'webkit_layout_tests%20%28with%20patch%29')

        self.assertEqual(
            host.stdout.getvalue(), '\n'.join([
                'Fetching results...',
                '-- Linux: %s' % results_url(self.linux_build),
                '-- Mac: %s' % results_url(self.mac_build),
                '-- Win: %s' % results_url(self.win_build), '',
                '### 1 unexpected passes:', '',
                'Bug(none) [ Mac ] something/pass-unexpectedly-mac.html [ Pass ]',
                '', '### 2 unexpected failures:', '',
                'Bug(none) something/fail-everywhere.html [ Failure ]',
                'Bug(none) [ Linux Win ] something/fail-win-and-linux.html [ Failure ]',
                ''
            ]))
Exemple #3
0
    def _add_base_manifest_to_mock_filesystem(self, filesystem):
        path_finder = PathFinder(filesystem)

        external_dir = path_finder.path_from_web_tests('external')
        filesystem.maybe_make_directory(filesystem.join(external_dir, 'wpt'))

        manifest_base_path = filesystem.join(external_dir, BASE_MANIFEST_NAME)
        filesystem.files[manifest_base_path] = '{"manifest": "base"}'
    def _add_base_manifest_to_mock_filesystem(self, filesystem):
        path_finder = PathFinder(filesystem)

        external_dir = path_finder.path_from_web_tests('external')
        filesystem.maybe_make_directory(filesystem.join(external_dir, 'wpt'))

        # This filename should match the constant BASE_MANIFEST_NAME.
        manifest_base_path = filesystem.join(external_dir,
                                             'WPT_BASE_MANIFEST_5.json')
        filesystem.files[manifest_base_path] = '{"manifest": "base"}'
Exemple #5
0
    def test_update_irrelevant_unexpected_pass(self):
        host = MockHost()
        filesystem = host.filesystem
        finder = PathFinder(filesystem)
        flag_expectations_file = finder.path_from_web_tests(
            'FlagExpectations', 'foo')
        self._setup_mock_results(host.buildbot)
        cmd = ['update', '--flag=--foo']

        # Unexpected passes that don't have flag-specific failure expectations
        # should not be reported.
        filesystem.write_text_file(flag_expectations_file, '')
        TryFlag(cmd, host, MockGitCL(host, self.mock_try_results)).run()
        self.assertTrue('### 0 unexpected passes' in host.stdout.getvalue())
Exemple #6
0
    def _run_trigger_test(self, regenerate):
        host = MockHost()
        git = host.git()
        git_cl = MockGitCL(host)
        finder = PathFinder(host.filesystem)

        flag_file = finder.path_from_web_tests(
            'additional-driver-flag.setting')
        flag_expectations_file = finder.path_from_web_tests(
            'FlagExpectations', 'foo')

        cmd = ['trigger', '--flag=--foo']
        if regenerate:
            cmd.append('--regenerate')
        TryFlag(cmd, host, git_cl).run()

        expected_added_paths = {flag_file}
        expected_commits = [[
            'Flag try job: force --foo for run_web_tests.py.'
        ]]

        if regenerate:
            expected_added_paths.add(flag_expectations_file)
            expected_commits.append(
                ['Flag try job: clear expectations for --foo.'])

        self.assertEqual(git.added_paths, expected_added_paths)
        self.assertEqual(git.local_commits(), expected_commits)

        self.assertEqual(git_cl.calls, [[
            'git', 'cl', 'upload', '--bypass-hooks', '-f', '-m',
            'Flag try job for --foo.'
        ], [
            'git', 'cl', 'try', '-B', 'luci.chromium.try', '-b', 'linux-rel'
        ], [
            'git', 'cl', 'try', '-B', 'luci.chromium.try', '-b', 'mac-rel'
        ], ['git', 'cl', 'try', '-B', 'luci.chromium.try', '-b', 'win7-rel']])
Exemple #7
0
    def ensure_manifest(host):
        """Updates the MANIFEST.json file, or generates if it does not exist."""
        finder = PathFinder(host.filesystem)
        manifest_path = finder.path_from_web_tests('external', 'wpt',
                                                   'MANIFEST.json')
        base_manifest_path = finder.path_from_web_tests(
            'external', BASE_MANIFEST_NAME)

        if not host.filesystem.exists(base_manifest_path):
            _log.error('Manifest base not found at "%s".', base_manifest_path)
            host.filesystem.write_text_file(base_manifest_path, '{}')

        # Unconditionally replace MANIFEST.json with the base manifest even if
        # the former exists, to avoid regenerating the manifest from scratch
        # when the manifest version changes. Remove the destination first as
        # copyfile will fail if the two files are hardlinked or symlinked.
        if host.filesystem.exists(manifest_path):
            host.filesystem.remove(manifest_path)
        host.filesystem.copyfile(base_manifest_path, manifest_path)

        wpt_path = finder.path_from_web_tests('external', 'wpt')
        WPTManifest.generate_manifest(host, wpt_path)

        _log.debug('Manifest generation completed.')
class DirectoryOwnersExtractor(object):
    def __init__(self, host):
        self.filesystem = host.filesystem
        self.finder = PathFinder(self.filesystem)
        self.executive = host.executive
        self.owner_map = None

    def list_owners(self, changed_files):
        """Looks up the owners for the given set of changed files.

        Args:
            changed_files: A list of file paths relative to the repository root.

        Returns:
            A dict mapping tuples of owner email addresses to lists of
            owned directories (paths relative to the root of web tests).
        """
        email_map = collections.defaultdict(set)
        external_root_owners = self.finder.path_from_web_tests(
            'external', 'OWNERS')
        for relpath in changed_files:
            # Try to find the first *non-empty* OWNERS file.
            absolute_path = self.finder.path_from_chromium_base(relpath)
            owners = None
            owners_file = self.find_owners_file(absolute_path)
            while owners_file:
                owners = self.extract_owners(owners_file)
                if owners:
                    break
                # Found an empty OWNERS file. Try again from the parent directory.
                absolute_path = self.filesystem.dirname(
                    self.filesystem.dirname(owners_file))
                owners_file = self.find_owners_file(absolute_path)
            # Skip web_tests/external/OWNERS.
            if not owners or owners_file == external_root_owners:
                continue

            owned_directory = self.filesystem.dirname(owners_file)
            owned_directory_relpath = self.filesystem.relpath(
                owned_directory, self.finder.web_tests_dir())
            email_map[tuple(owners)].add(owned_directory_relpath)
        return {
            owners: sorted(owned_directories)
            for owners, owned_directories in email_map.iteritems()
        }

    def find_owners_file(self, start_path):
        """Finds the first enclosing OWNERS file for a given path.

        Starting from the given path, walks up the directory tree until the
        first OWNERS file is found or web_tests/external is reached.

        Args:
            start_path: A relative path from the root of the repository, or an
                absolute path. The path can be a file or a directory.

        Returns:
            The absolute path to the first OWNERS file found; None if not found
            or if start_path is outside of web_tests/external.
        """
        abs_start_path = (start_path if self.filesystem.isabs(start_path) else
                          self.finder.path_from_chromium_base(start_path))
        directory = (abs_start_path if self.filesystem.isdir(abs_start_path)
                     else self.filesystem.dirname(abs_start_path))
        external_root = self.finder.path_from_web_tests('external')
        if not directory.startswith(external_root):
            return None
        # Stop at web_tests, which is the parent of external_root.
        while directory != self.finder.web_tests_dir():
            owners_file = self.filesystem.join(directory, 'OWNERS')
            if self.filesystem.isfile(
                    self.finder.path_from_chromium_base(owners_file)):
                return owners_file
            directory = self.filesystem.dirname(directory)
        return None

    def extract_owners(self, owners_file):
        """Extracts owners from an OWNERS file.

        Args:
            owners_file: An absolute path to an OWNERS file.

        Returns:
            A list of valid owners (email addresses).
        """
        contents = self._read_text_file(owners_file)
        email_regexp = re.compile(BASIC_EMAIL_REGEXP)
        addresses = []
        for line in contents.splitlines():
            line = line.strip()
            if email_regexp.match(line):
                addresses.append(line)
        return addresses

    def extract_component(self, owners_file):
        """Extracts the component from an OWNERS file.

        Args:
            owners_file: An absolute path to an OWNERS file.

        Returns:
            A string, or None if not found.
        """
        dir_metadata = self._read_dir_metadata(owners_file)
        if dir_metadata and dir_metadata.component:
            return dir_metadata.component

        contents = self._read_text_file(owners_file)
        search = re.search(COMPONENT_REGEXP, contents, re.MULTILINE)
        if search:
            return search.group(1)
        return None

    def is_wpt_notify_enabled(self, owners_file):
        """Checks if the OWNERS file enables WPT-NOTIFY.

        Args:
            owners_file: An absolute path to an OWNERS file.

        Returns:
            A boolean.
        """
        dir_metadata = self._read_dir_metadata(owners_file)
        if dir_metadata and dir_metadata.should_notify is not None:
            return dir_metadata.should_notify

        contents = self._read_text_file(owners_file)
        return bool(re.search(WPT_NOTIFY_REGEXP, contents, re.MULTILINE))

    @memoized
    def _read_text_file(self, path):
        return self.filesystem.read_text_file(path)

    @memoized
    def _read_dir_metadata(self, path):
        """Read the content from a path.

        Args:
            path: An absolute path.

        Returns:
            A WPTDirMetadata object, or None if not found.
        """
        root_path = self.finder.web_tests_dir()
        dir_path = self.filesystem.dirname(path)

        # dirmd starts with an absolute directory path, `dir_path`, traverses all
        # parent directories and stops at `root_path` to find the first available DIR_METADATA
        # file. `root_path` is the web_tests directory.
        json_data = self.executive.run_command([
            self.finder.path_from_depot_tools_base('dirmd'), 'compute',
            '-root', root_path, dir_path
        ])
        try:
            data = json.loads(json_data)
        except ValueError:
            return None

        relative_path = self.filesystem.relpath(dir_path, root_path)
        return WPTDirMetadata(data, relative_path)
Exemple #9
0
class Manager(object):
    """A class for managing running a series of web tests."""

    HTTP_SUBDIR = 'http'
    PERF_SUBDIR = 'perf'
    WEBSOCKET_SUBDIR = 'websocket'
    ARCHIVED_RESULTS_LIMIT = 25

    def __init__(self, port, options, printer):
        """Initializes test runner data structures.

        Args:
            port: An object implementing platform-specific functionality.
            options: An options argument which contains command line options.
            printer: A Printer object to record updates to.
        """
        self._port = port
        self._filesystem = port.host.filesystem
        self._options = options
        self._printer = printer

        self._expectations = None
        self._http_server_started = False
        self._wptserve_started = False
        self._websockets_server_started = False

        self._results_directory = self._port.results_directory()
        self._artifacts_directory = self._port.artifacts_directory()
        self._finder = WebTestFinder(self._port, self._options)
        self._path_finder = PathFinder(port.host.filesystem)

        self._sink = CreateTestResultSink(self._port)
        self._runner = WebTestRunner(self._options, self._port, self._printer,
                                     self._results_directory,
                                     self._test_is_slow, self._sink)

    def run(self, args):
        """Runs the tests and return a RunDetails object with the results."""
        start_time = time.time()
        self._printer.write_update('Collecting tests ...')
        running_all_tests = False

        try:
            paths, all_test_names, running_all_tests = self._collect_tests(
                args)
        except IOError:
            # This is raised if --test-list doesn't exist
            return test_run_results.RunDetails(
                exit_code=exit_codes.NO_TESTS_EXIT_STATUS)

        test_names = self._finder.split_into_chunks(all_test_names)
        if self._options.order == 'natural':
            test_names.sort(key=self._port.test_key)
        elif self._options.order == 'random':
            test_names.sort()
            random.Random(self._options.seed).shuffle(test_names)
        elif self._options.order == 'none':
            # Restore the test order to user specified order.
            # base.tests() may change the order as it returns tests in the
            # real, external/wpt, virtual order.
            if paths:
                test_names = self._restore_order(paths, test_names)

        if not self._options.no_expectations:
            self._printer.write_update('Parsing expectations ...')
            self._expectations = test_expectations.TestExpectations(self._port)

        tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)

        self._printer.print_found(len(all_test_names), len(test_names),
                                  len(tests_to_run), self._options.repeat_each,
                                  self._options.iterations)

        # Check to make sure we're not skipping every test.
        if not tests_to_run:
            msg = 'No tests to run.'
            if self._options.zero_tests_executed_ok:
                _log.info(msg)
                # Keep executing to produce valid (but empty) results.
            else:
                _log.critical(msg)
                code = exit_codes.NO_TESTS_EXIT_STATUS
                return test_run_results.RunDetails(exit_code=code)

        exit_code = self._set_up_run(tests_to_run)
        if exit_code:
            return test_run_results.RunDetails(exit_code=exit_code)

        if self._options.num_retries is None:
            # If --test-list is passed, or if no test narrowing is specified,
            # default to 3 retries. Otherwise [e.g. if tests are being passed by
            # name], default to 0 retries.
            if self._options.test_list or len(paths) < len(test_names):
                self._options.num_retries = 3
            else:
                self._options.num_retries = 0

        should_retry_failures = self._options.num_retries > 0

        try:
            self._start_servers(tests_to_run)
            if self._options.watch:
                run_results = self._run_test_loop(tests_to_run, tests_to_skip)
            else:
                run_results = self._run_test_once(tests_to_run, tests_to_skip,
                                                  should_retry_failures)
            initial_results, all_retry_results = run_results
        finally:
            self._stop_servers()
            self._clean_up_run()

        if self._options.no_expectations:
            return test_run_results.RunDetails(0, [], [], initial_results,
                                               all_retry_results)

        # Some crash logs can take a long time to be written out so look
        # for new logs after the test run finishes.
        self._printer.write_update('Looking for new crash logs ...')
        self._look_for_new_crash_logs(initial_results, start_time)
        for retry_attempt_results in all_retry_results:
            self._look_for_new_crash_logs(retry_attempt_results, start_time)

        self._printer.write_update('Summarizing results ...')
        summarized_full_results = test_run_results.summarize_results(
            self._port, self._expectations, initial_results, all_retry_results)
        summarized_failing_results = test_run_results.summarize_results(
            self._port,
            self._expectations,
            initial_results,
            all_retry_results,
            only_include_failing=True)

        exit_code = summarized_failing_results['num_regressions']
        if exit_code > exit_codes.MAX_FAILURES_EXIT_STATUS:
            _log.warning('num regressions (%d) exceeds max exit status (%d)',
                         exit_code, exit_codes.MAX_FAILURES_EXIT_STATUS)
            exit_code = exit_codes.MAX_FAILURES_EXIT_STATUS

        if not self._options.dry_run:
            self._write_json_files(summarized_full_results,
                                   summarized_failing_results, initial_results,
                                   running_all_tests)

            self._upload_json_files()

            self._copy_results_html_file(self._artifacts_directory,
                                         'results.html')
            if initial_results.keyboard_interrupted:
                exit_code = exit_codes.INTERRUPTED_EXIT_STATUS
            else:
                if initial_results.interrupted:
                    exit_code = exit_codes.EARLY_EXIT_STATUS
                if (self._options.show_results
                        and (exit_code or initial_results.total_failures)):
                    self._port.show_results_html_file(
                        self._filesystem.join(self._artifacts_directory,
                                              'results.html'))
                self._printer.print_results(time.time() - start_time,
                                            initial_results)

        return test_run_results.RunDetails(exit_code, summarized_full_results,
                                           summarized_failing_results,
                                           initial_results, all_retry_results)

    def _run_test_loop(self, tests_to_run, tests_to_skip):
        # Don't show results in a new browser window because we're already
        # printing the link to diffs in the loop
        self._options.show_results = False

        while True:
            initial_results, all_retry_results = self._run_test_once(
                tests_to_run, tests_to_skip, should_retry_failures=False)
            for name in initial_results.failures_by_name:
                failure = initial_results.failures_by_name[name][0]
                if isinstance(failure, test_failures.FailureTextMismatch):
                    full_test_path = self._filesystem.join(
                        self._artifacts_directory, name)
                    filename, _ = self._filesystem.splitext(full_test_path)
                    pretty_diff_path = 'file://' + filename + '-pretty-diff.html'
                    self._printer.writeln('Link to pretty diff:')
                    self._printer.writeln(pretty_diff_path + '\n')
            self._printer.writeln('Finished running tests')

            user_input = self._port.host.user.prompt(
                'Interactive watch mode: (q)uit (r)etry\n').lower()

            if user_input == 'q' or user_input == 'quit':
                return (initial_results, all_retry_results)

    def _run_test_once(self, tests_to_run, tests_to_skip,
                       should_retry_failures):
        num_workers = self._port.num_workers(int(
            self._options.child_processes))

        initial_results = self._run_tests(tests_to_run, tests_to_skip,
                                          self._options.repeat_each,
                                          self._options.iterations,
                                          num_workers)

        # Don't retry failures when interrupted by user or failures limit exception.
        should_retry_failures = should_retry_failures and not (
            initial_results.interrupted
            or initial_results.keyboard_interrupted)

        tests_to_retry = self._tests_to_retry(initial_results)
        all_retry_results = []
        if should_retry_failures and tests_to_retry:
            for retry_attempt in xrange(1, self._options.num_retries + 1):
                if not tests_to_retry:
                    break

                _log.info('')
                _log.info(
                    'Retrying %s, attempt %d of %d...',
                    grammar.pluralize('unexpected failure',
                                      len(tests_to_retry)), retry_attempt,
                    self._options.num_retries)

                retry_results = self._run_tests(tests_to_retry,
                                                tests_to_skip=set(),
                                                repeat_each=1,
                                                iterations=1,
                                                num_workers=num_workers,
                                                retry_attempt=retry_attempt)
                all_retry_results.append(retry_results)

                tests_to_retry = self._tests_to_retry(retry_results)
        return (initial_results, all_retry_results)

    def _restore_order(self, paths, test_names):
        original_test_names = list(test_names)
        test_names = []
        for path in paths:
            for test in original_test_names:
                if test.startswith(path) or fnmatch.fnmatch(test, path):
                    test_names.append(test)
        test_names += list(set(original_test_names) - set(test_names))
        return test_names

    def _collect_tests(self, args):
        return self._finder.find_tests(
            args,
            test_list=self._options.test_list,
            fastest_percentile=self._options.fastest,
            filters=self._options.isolated_script_test_filter)

    def _is_http_test(self, test):
        return (test.startswith(self.HTTP_SUBDIR +
                                self._port.TEST_PATH_SEPARATOR)
                or self._is_websocket_test(test)
                or self._port.TEST_PATH_SEPARATOR + self.HTTP_SUBDIR +
                self._port.TEST_PATH_SEPARATOR in test)

    def _is_websocket_test(self, test):
        if self._port.should_use_wptserve(test):
            return False

        return self.WEBSOCKET_SUBDIR + self._port.TEST_PATH_SEPARATOR in test

    def _http_tests(self, test_names):
        return set(test for test in test_names if self._is_http_test(test))

    def _is_perf_test(self, test):
        return (self.PERF_SUBDIR == test
                or (self.PERF_SUBDIR + self._port.TEST_PATH_SEPARATOR) in test)

    def _prepare_lists(self, paths, test_names):
        tests_to_skip = self._finder.skip_tests(paths, test_names,
                                                self._expectations)
        tests_to_run = [
            test for test in test_names if test not in tests_to_skip
        ]

        return tests_to_run, tests_to_skip

    def _test_input_for_file(self, test_file, retry_attempt):
        return TestInput(
            test_file,
            self._options.slow_time_out_ms
            if self._test_is_slow(test_file) else self._options.time_out_ms,
            self._test_requires_lock(test_file),
            retry_attempt=retry_attempt)

    def _test_requires_lock(self, test_file):
        """Returns True if the test needs to be locked when running multiple
        instances of this test runner.

        Perf tests are locked because heavy load caused by running other
        tests in parallel might cause some of them to time out.
        """
        return self._is_http_test(test_file) or self._is_perf_test(test_file)

    def _test_is_slow(self, test_file):
        if not self._expectations:
            return False
        is_slow_test = self._expectations.get_expectations(
            test_file).is_slow_test
        return is_slow_test or self._port.is_slow_wpt_test(test_file)

    def _needs_servers(self, test_names):
        return any(
            self._test_requires_lock(test_name) for test_name in test_names)

    def _rename_results_folder(self):
        try:
            timestamp = time.strftime(
                "%Y-%m-%d-%H-%M-%S",
                time.localtime(
                    self._filesystem.mtime(
                        self._filesystem.join(self._artifacts_directory,
                                              'results.html'))))
        except (IOError, OSError) as error:
            # It might be possible that results.html was not generated in previous run, because the test
            # run was interrupted even before testing started. In those cases, don't archive the folder.
            # Simply override the current folder contents with new results.
            import errno
            if error.errno in (errno.EEXIST, errno.ENOENT):
                self._printer.write_update(
                    'No results.html file found in previous run, skipping it.')
            return None
        archived_name = ''.join(
            (self._filesystem.basename(self._artifacts_directory), '_',
             timestamp))
        archived_path = self._filesystem.join(
            self._filesystem.dirname(self._artifacts_directory), archived_name)
        self._filesystem.move(self._artifacts_directory, archived_path)

    def _delete_dirs(self, dir_list):
        for dir_path in dir_list:
            self._filesystem.rmtree(dir_path)

    def _limit_archived_results_count(self):
        results_directory_path = self._filesystem.dirname(
            self._artifacts_directory)
        file_list = self._filesystem.listdir(results_directory_path)
        results_directories = []
        for name in file_list:
            file_path = self._filesystem.join(results_directory_path, name)
            if (self._filesystem.isdir(file_path)
                    and self._artifacts_directory in file_path):
                results_directories.append(file_path)
        results_directories.sort(key=self._filesystem.mtime)
        self._printer.write_update('Clobbering excess archived results in %s' %
                                   results_directory_path)
        self._delete_dirs(results_directories[:-self.ARCHIVED_RESULTS_LIMIT])

    def _set_up_run(self, test_names):
        self._printer.write_update('Checking build ...')
        if self._options.build:
            exit_code = self._port.check_build(self._needs_servers(test_names),
                                               self._printer)
            if exit_code:
                _log.error('Build check failed')
                return exit_code

        if self._options.clobber_old_results:
            self._clobber_old_results()
        elif self._filesystem.exists(self._artifacts_directory):
            self._limit_archived_results_count()
            # Rename the existing results folder for archiving.
            self._rename_results_folder()

        # Create the output directory if it doesn't already exist.
        self._port.host.filesystem.maybe_make_directory(
            self._artifacts_directory)

        exit_code = self._port.setup_test_run()
        if exit_code:
            _log.error('Build setup failed')
            return exit_code

        # Check that the system dependencies (themes, fonts, ...) are correct.
        if not self._options.nocheck_sys_deps:
            self._printer.write_update('Checking system dependencies ...')
            exit_code = self._port.check_sys_deps()
            if exit_code:
                return exit_code

        return exit_codes.OK_EXIT_STATUS

    def _run_tests(self,
                   tests_to_run,
                   tests_to_skip,
                   repeat_each,
                   iterations,
                   num_workers,
                   retry_attempt=0):

        test_inputs = []
        for _ in xrange(iterations):
            for test in tests_to_run:
                for _ in xrange(repeat_each):
                    test_inputs.append(
                        self._test_input_for_file(test, retry_attempt))
        return self._runner.run_tests(self._expectations, test_inputs,
                                      tests_to_skip, num_workers,
                                      retry_attempt)

    def _start_servers(self, tests_to_run):
        if any(self._port.is_wpt_test(test) for test in tests_to_run):
            self._printer.write_update('Starting WPTServe ...')
            self._port.start_wptserve()
            self._wptserve_started = True

        if (self._port.requires_http_server()
                or any(self._is_http_test(test) for test in tests_to_run)):
            self._printer.write_update('Starting HTTP server ...')
            self._port.start_http_server(
                additional_dirs={},
                number_of_drivers=self._options.max_locked_shards)
            self._http_server_started = True

        if any(self._is_websocket_test(test) for test in tests_to_run):
            self._printer.write_update('Starting WebSocket server ...')
            self._port.start_websocket_server()
            self._websockets_server_started = True

    def _stop_servers(self):
        if self._wptserve_started:
            self._printer.write_update('Stopping WPTServe ...')
            self._wptserve_started = False
            self._port.stop_wptserve()
        if self._http_server_started:
            self._printer.write_update('Stopping HTTP server ...')
            self._http_server_started = False
            self._port.stop_http_server()
        if self._websockets_server_started:
            self._printer.write_update('Stopping WebSocket server ...')
            self._websockets_server_started = False
            self._port.stop_websocket_server()

    def _clean_up_run(self):
        _log.debug('Flushing stdout')
        sys.stdout.flush()
        _log.debug('Flushing stderr')
        sys.stderr.flush()
        _log.debug('Cleaning up port')
        self._port.clean_up_test_run()
        if self._sink:
            _log.debug('Closing sink')
            self._sink.close()

    def _look_for_new_crash_logs(self, run_results, start_time):
        """Looks for and writes new crash logs, at the end of the test run.

        Since crash logs can take a long time to be written out if the system is
        under stress, do a second pass at the end of the test run.

        Args:
            run_results: The results of the test run.
            start_time: Time the tests started at. We're looking for crash
                logs after that time.
        """
        crashed_processes = []
        test_to_crash_failure = {}

        # reset static variables for Failure type classes
        test_failures.AbstractTestResultType.port = self._port
        test_failures.AbstractTestResultType.result_directory = self._results_directory
        test_failures.AbstractTestResultType.filesystem = self._filesystem

        for test, result in run_results.unexpected_results_by_name.iteritems():
            if result.type != ResultType.Crash:
                continue
            for failure in result.failures:
                if (not isinstance(failure, test_failures.FailureCrash)
                        or failure.has_log):
                    continue
                crashed_processes.append(
                    [test, failure.process_name, failure.pid])
                test_to_crash_failure[test] = failure

        sample_files = self._port.look_for_new_samples(crashed_processes,
                                                       start_time) or {}
        for test, sample_file in sample_files.iteritems():
            test_failures.AbstractTestResultType.test_name = test
            test_result = run_results.unexpected_results_by_name[test]
            artifact_relative_path = self._port.output_filename(
                test, test_failures.FILENAME_SUFFIX_SAMPLE, '.txt')
            artifacts_sub_dir = test_result.artifacts.ArtifactsSubDirectory()
            artifact_abspath = self._filesystem.join(self._results_directory,
                                                     artifacts_sub_dir,
                                                     artifact_relative_path)
            self._filesystem.maybe_make_directory(
                self._filesystem.dirname(artifact_abspath))
            self._filesystem.copyfile(sample_file, artifact_abspath)
            test_result.artifacts.AddArtifact(
                'sample_file',
                self._filesystem.join(artifacts_sub_dir,
                                      artifact_relative_path))

        new_crash_logs = self._port.look_for_new_crash_logs(
            crashed_processes, start_time) or {}
        for test, (crash_log, crash_site) in new_crash_logs.iteritems():
            test_failures.AbstractTestResultType.test_name = test
            failure.crash_log = crash_log
            failure.has_log = self._port.output_contains_sanitizer_messages(
                failure.crash_log)
            test_result = run_results.unexpected_results_by_name[test]
            test_result.crash_site = crash_site
            test_to_crash_failure[test].create_artifacts(test_result.artifacts,
                                                         force_overwrite=True)

    def _clobber_old_results(self):
        dir_above_results_path = self._filesystem.dirname(
            self._artifacts_directory)
        self._printer.write_update('Clobbering old results in %s.' %
                                   dir_above_results_path)
        if not self._filesystem.exists(dir_above_results_path):
            return
        file_list = self._filesystem.listdir(dir_above_results_path)
        results_directories = []
        for name in file_list:
            file_path = self._filesystem.join(dir_above_results_path, name)
            if (self._filesystem.isdir(file_path)
                    and self._artifacts_directory in file_path):
                results_directories.append(file_path)
        self._delete_dirs(results_directories)

        # Port specific clean-up.
        self._port.clobber_old_port_specific_results()

    def _tests_to_retry(self, run_results):
        # TODO(ojan): This should also check that result.type != test_expectations.MISSING
        # since retrying missing expectations is silly. But that's a bit tricky since we
        # only consider the last retry attempt for the count of unexpected regressions.
        return [
            result.test_name
            for result in run_results.unexpected_results_by_name.values()
            if result.type != ResultType.Pass
        ]

    def _write_json_files(self, summarized_full_results,
                          summarized_failing_results, initial_results,
                          running_all_tests):
        _log.debug("Writing JSON files in %s.", self._artifacts_directory)

        # FIXME: Upload stats.json to the server and delete times_ms.
        times_trie = json_results_generator.test_timings_trie(
            initial_results.results_by_name.values())
        times_json_path = self._filesystem.join(self._artifacts_directory,
                                                'times_ms.json')
        json_results_generator.write_json(self._filesystem, times_trie,
                                          times_json_path)

        # Save out the times data so we can use it for --fastest in the future.
        if running_all_tests:
            bot_test_times_path = self._port.bot_test_times_path()
            self._filesystem.maybe_make_directory(
                self._filesystem.dirname(bot_test_times_path))
            json_results_generator.write_json(self._filesystem, times_trie,
                                              bot_test_times_path)

        stats_trie = self._stats_trie(initial_results)
        stats_path = self._filesystem.join(self._artifacts_directory,
                                           'stats.json')
        self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))

        full_results_path = self._filesystem.join(self._artifacts_directory,
                                                  'full_results.json')
        json_results_generator.write_json(self._filesystem,
                                          summarized_full_results,
                                          full_results_path)

        full_results_jsonp_path = self._filesystem.join(
            self._artifacts_directory, 'full_results_jsonp.js')
        json_results_generator.write_json(self._filesystem,
                                          summarized_full_results,
                                          full_results_jsonp_path,
                                          callback='ADD_FULL_RESULTS')
        full_results_path = self._filesystem.join(self._artifacts_directory,
                                                  'failing_results.json')
        # We write failing_results.json out as jsonp because we need to load it
        # from a file url for results.html and Chromium doesn't allow that.
        json_results_generator.write_json(self._filesystem,
                                          summarized_failing_results,
                                          full_results_path,
                                          callback='ADD_RESULTS')

        # Write out the JSON files suitable for other tools to process.
        # As the output can be quite large (as there are 60k+ tests) we also
        # support only outputting the failing results.
        if self._options.json_failing_test_results:
            # FIXME(tansell): Make sure this includes an *unexpected* results
            # (IE Passing when expected to be failing.)
            json_results_generator.write_json(
                self._filesystem, summarized_failing_results,
                self._options.json_failing_test_results)
        if self._options.json_test_results:
            json_results_generator.write_json(self._filesystem,
                                              summarized_full_results,
                                              self._options.json_test_results)

        _log.debug('Finished writing JSON files.')

    def _upload_json_files(self):
        if not self._options.test_results_server:
            return

        if not self._options.master_name:
            _log.error(
                '--test-results-server was set, but --master-name was not.  Not uploading JSON files.'
            )
            return

        _log.debug('Uploading JSON files for builder: %s',
                   self._options.builder_name)
        attrs = [('builder', self._options.builder_name),
                 ('testtype', self._options.step_name),
                 ('master', self._options.master_name)]

        files = [
            (name, self._filesystem.join(self._artifacts_directory, name))
            for name in
            ['failing_results.json', 'full_results.json', 'times_ms.json']
        ]

        url = 'https://%s/testfile/upload' % self._options.test_results_server
        # Set uploading timeout in case appengine server is having problems.
        # 120 seconds are more than enough to upload test results.
        uploader = FileUploader(url, 120)
        try:
            response = uploader.upload_as_multipart_form_data(
                self._filesystem, files, attrs)
            if response:
                if response.code == 200:
                    _log.debug('JSON uploaded.')
                else:
                    _log.debug('JSON upload failed, %d: "%s"', response.code,
                               response.read())
            else:
                _log.error('JSON upload failed; no response returned')
        except IOError as err:
            _log.error('Upload failed: %s', err)

    def _copy_results_html_file(self, destination_dir, filename):
        """Copies a file from the template directory to the results directory."""
        template_dir = self._path_finder.path_from_web_tests('fast', 'harness')
        source_path = self._filesystem.join(template_dir, filename)
        destination_path = self._filesystem.join(destination_dir, filename)
        # Note that the results.html template file won't exist when
        # we're using a MockFileSystem during unit tests, so make sure
        # it exists before we try to copy it.
        if self._filesystem.exists(source_path):
            self._filesystem.copyfile(source_path, destination_path)

    def _stats_trie(self, initial_results):
        def _worker_number(worker_name):
            return int(worker_name.split('/')[1]) if worker_name else -1

        stats = {}
        for result in initial_results.results_by_name.values():
            if result.type != ResultType.Skip:
                stats[result.test_name] = {
                    'results':
                    (_worker_number(result.worker_name), result.test_number,
                     result.pid, int(result.test_run_time * 1000),
                     int(result.total_run_time * 1000))
                }
        stats_trie = {}
        for name, value in stats.iteritems():
            json_results_generator.add_path_to_trie(name, value, stats_trie)
        return stats_trie
Exemple #10
0
class TryFlag(object):
    def __init__(self, argv, host, git_cl):
        self._args = parse_args(argv)
        self._host = host
        self._git_cl = git_cl
        self._expectations = []
        self._filesystem = self._host.filesystem
        self._path_finder = PathFinder(self._filesystem)
        self._git = self._host.git()

    def _force_flag_for_test_runner(self):
        flag = self._args.flag
        path = self._path_finder.path_from_web_tests(FLAG_FILE)
        self._filesystem.write_text_file(path, flag + '\n')
        self._git.add_list([path])
        self._git.commit_locally_with_message(
            'Flag try job: force %s for run_web_tests.py.' % flag)

    def _flag_expectations_path(self):
        return self._path_finder.path_from_web_tests(
            'FlagExpectations', self._args.flag.lstrip('-'))

    def _clear_expectations(self):
        path = self._flag_expectations_path()
        self._filesystem.write_text_file(path, '')
        self._git.add_list([path])
        self._git.commit_locally_with_message(
            'Flag try job: clear expectations for %s.' % self._args.flag)

    def _tests_in_flag_expectations(self):
        path = self._flag_expectations_path()
        content = self._filesystem.read_text_file(path)
        test_expectations = TestExpectations()
        test_expectations.parse_tagged_list(content)
        return {
            test_name
            for test_name in test_expectations.individual_exps.keys()
        }

    def trigger(self):
        self._force_flag_for_test_runner()
        if self._args.regenerate:
            self._clear_expectations()
        self._git_cl.run([
            'upload', '--bypass-hooks', '-f', '-m',
            'Flag try job for %s.' % self._args.flag
        ])
        for builder in sorted(BUILDER_BUCKETS):
            bucket = BUILDER_BUCKETS[builder]
            self._git_cl.trigger_try_jobs([builder], bucket)

    def _create_expectation_line(self, result, test_configuration):
        expected_results = set(
            [res for res in result.actual_results().split()])
        tag = test_configuration.version
        reason = ''
        if self._args.bug:
            reason = 'crbug.com/' + self._args.bug
        return Expectation(test=result.test_name(),
                           results=expected_results,
                           tags=set([tag]),
                           reason=reason)

    def _process_result(self, build, result):
        if not result.did_run_as_expected():
            self._expectations.append(
                self._create_expectation_line(
                    result, BUILDER_CONFIGS[build.builder_name]))

    def update(self):
        self._host.print_('Fetching results...')
        # TODO: Get jobs from the _tryflag branch. Current branch for now.
        jobs = self._git_cl.latest_try_jobs(
            builder_names=BUILDER_CONFIGS.keys())
        results_fetcher = self._host.results_fetcher
        for build in sorted(jobs):
            self._host.print_('-- %s: %s/results.html' %
                              (BUILDER_CONFIGS[build.builder_name].version,
                               results_fetcher.results_url(
                                   build.builder_name, build.build_number)))
            results = results_fetcher.fetch_results(build, True)
            results.for_each_test(
                lambda result, b=build: self._process_result(b, result))

        # TODO: Write to flag expectations file. For now, stdout. :)
        unexpected_failures = []
        unexpected_passes = []
        tests_in_flag_expectations = self._tests_in_flag_expectations()
        for exp in self._expectations:
            if ResultType.Pass not in exp.results:
                unexpected_failures.append(exp)
            elif exp.test in tests_in_flag_expectations:
                unexpected_passes.append(exp)
        unexpected_passes = sorted(unexpected_passes, key=lambda e: e.test)
        unexpected_failures = sorted(unexpected_failures, key=lambda e: e.test)
        self._print_all(unexpected_passes, 'unexpected passes')
        self._print_all(unexpected_failures, 'unexpected failures')

    def _print_all(self, exps, description):
        self._host.print_('\n### %s %s:\n' % (len(exps), description))
        for exp in exps:
            self._host.print_(exp.to_string())

    def run(self):
        action = self._args.action
        if action == 'trigger':
            self.trigger()
        elif action == 'update':
            self.update()
        else:
            print >> self._host.stderr, 'specify "trigger" or "update"'
            return 1
        return 0
Exemple #11
0
class ImportNotifier(object):
    def __init__(self, host, chromium_git, local_wpt):
        self.host = host
        self.git = chromium_git
        self.local_wpt = local_wpt

        self._monorail_api = MonorailAPI
        self.default_port = host.port_factory.get()
        self.finder = PathFinder(host.filesystem)
        self.owners_extractor = DirectoryOwnersExtractor(host)
        self.new_failures_by_directory = defaultdict(list)
        self.components_for_product = {ANDROID_WEBLAYER: ["Internals>WebLayer"]}
        self.labels_for_product = {
            ANDROID_WEBLAYER: ["Project-WebLayer-WebPlatformSupport", "WL-WPT-Compat"]
        }

    def main(self,
             wpt_revision_start,
             wpt_revision_end,
             rebaselined_tests,
             test_expectations,
             new_override_expectations,
             issue,
             patchset,
             dry_run=True,
             service_account_key_json=None):
        """Files bug reports for new failures.

        Args:
            wpt_revision_start: The start of the imported WPT revision range
                (exclusive), i.e. the last imported revision.
            wpt_revision_end: The end of the imported WPT revision range
                (inclusive), i.e. the current imported revision.
            rebaselined_tests: A list of test names that have been rebaselined.
            test_expectations: A dictionary mapping names of tests that cannot
                be rebaselined to a list of new test expectation lines.
            issue: The issue number of the import CL (a string).
            patchset: The patchset number of the import CL (a string).
            dry_run: If True, no bugs will be actually filed to crbug.com.
            service_account_key_json: The path to a JSON private key of a
                service account for accessing Monorail. If None, try to get an
                access token from luci-auth.

        Note: "test names" are paths of the tests relative to web_tests.
        """
        gerrit_url = SHORT_GERRIT_PREFIX + issue
        gerrit_url_with_ps = gerrit_url + '/' + patchset + '/'

        changed_test_baselines = self.find_changed_baselines_of_tests(
            rebaselined_tests)
        self.examine_baseline_changes(changed_test_baselines,
                                      gerrit_url_with_ps)
        self.examine_new_test_expectations(test_expectations)

        bugs = self.create_bugs_from_new_failures(wpt_revision_start,
                                                  wpt_revision_end, gerrit_url)
        self.file_bugs(bugs, dry_run, service_account_key_json)

        for product, expectation_lines in new_override_expectations.items():
            bugs = self.create_bugs_for_product(wpt_revision_start,
                                                wpt_revision_end,
                                                gerrit_url,
                                                product,
                                                expectation_lines)
            self.file_bugs(bugs, dry_run, service_account_key_json)

    def find_changed_baselines_of_tests(self, rebaselined_tests):
        """Finds the corresponding changed baselines of each test.

        Args:
            rebaselined_tests: A list of test names that have been rebaselined.

        Returns:
            A dictionary mapping test names to paths of their baselines changed
            in this import CL (paths relative to the root of Chromium repo).
        """
        test_baselines = {}
        changed_files = self.git.changed_files()
        for test_name in rebaselined_tests:
            test_without_ext, _ = self.host.filesystem.splitext(test_name)
            changed_baselines = []
            # TODO(robertma): Refactor this into web_tests.port.base.
            baseline_name = test_without_ext + '-expected.txt'
            for changed_file in changed_files:
                if changed_file.endswith(baseline_name):
                    changed_baselines.append(changed_file)
            if changed_baselines:
                test_baselines[test_name] = changed_baselines
        return test_baselines

    def examine_baseline_changes(self, changed_test_baselines,
                                 gerrit_url_with_ps):
        """Examines all changed baselines to find new failures.

        Args:
            changed_test_baselines: A dictionary mapping test names to paths of
                changed baselines.
            gerrit_url_with_ps: Gerrit URL of this CL with the patchset number.
        """
        for test_name, changed_baselines in changed_test_baselines.items():
            directory = self.find_owned_directory(test_name)
            if not directory:
                _log.warning('Cannot find OWNERS of %s', test_name)
                continue

            for baseline in changed_baselines:
                if self.more_failures_in_baseline(baseline):
                    self.new_failures_by_directory[directory].append(
                        TestFailure(
                            TestFailure.BASELINE_CHANGE,
                            test_name,
                            baseline_path=baseline,
                            gerrit_url_with_ps=gerrit_url_with_ps))

    def more_failures_in_baseline(self, baseline):
        """Determines if a testharness.js baseline file has new failures.

        The file is assumed to have been modified in the current git checkout,
        and so has a diff we can parse.

        We recognize two types of failures: FAIL lines, which are output for a
        specific subtest failing, and harness errors, which indicate an uncaught
        error in the test. Increasing numbers of either are considered new
        failures - this includes going from FAIL to error or vice-versa.
        """

        diff = self.git.run(['diff', '-U0', 'origin/main', '--', baseline])
        delta_failures = 0
        delta_harness_errors = 0
        for line in diff.splitlines():
            if line.startswith('+FAIL'):
                delta_failures += 1
            if line.startswith('-FAIL'):
                delta_failures -= 1
            if line.startswith('+Harness Error.'):
                delta_harness_errors += 1
            if line.startswith('-Harness Error.'):
                delta_harness_errors -= 1
        return delta_failures > 0 or delta_harness_errors > 0

    def examine_new_test_expectations(self, test_expectations):
        """Examines new test expectations to find new failures.

        Args:
            test_expectations: A dictionary mapping names of tests that cannot
                be rebaselined to a list of new test expectation lines.
        """
        for test_name, expectation_lines in test_expectations.items():
            directory = self.find_owned_directory(test_name)
            if not directory:
                _log.warning('Cannot find OWNERS of %s', test_name)
                continue

            for expectation_line in expectation_lines:
                self.new_failures_by_directory[directory].append(
                    TestFailure(
                        TestFailure.NEW_EXPECTATION,
                        test_name,
                        expectation_line=expectation_line))

    def create_bugs_for_product(self, wpt_revision_start, wpt_revision_end,
                                gerrit_url, product, expectation_lines):
        """Files bug reports for new failures per product

        Args:
            wpt_revision_start: The start of the imported WPT revision range
                (exclusive), i.e. the last imported revision.
            wpt_revision_end: The end of the imported WPT revision range
                (inclusive), i.e. the current imported revision.
            gerrit_url: Gerrit URL of the CL.
            product: the product for which to file bugs for.
            expectation_lines: list of new expectations for this product

        Return:
            A MonorailIssue object that should be filed.
        """
        bugs = []
        summary = '[WPT] New failures introduced by import {}'.format(gerrit_url)

        prologue = ('WPT import {} introduced new failures:\n\n'
                    'List of new failures:\n'.format(gerrit_url))

        failure_list = ''
        for _, failure in expectation_lines.items():
            failure_list += str(failure) + '\n'

        expectations_statement = (
            '\nExpectations have been automatically added for '
            'the failing results to keep the bots green. Please '
            'investigate the new failures and triage as appropriate.\n')

        range_statement = '\nThis import contains upstream changes from {} to {}:\n'.format(
            wpt_revision_start, wpt_revision_end)

        description = (prologue + failure_list + expectations_statement +
                       range_statement)

        bug = MonorailIssue.new_chromium_issue(
            summary,
            description,
            cc=[],
            components=self.components_for_product[product],
            labels=self.labels_for_product[product])
        bugs.append(bug)
        return bugs

    def create_bugs_from_new_failures(self, wpt_revision_start,
                                      wpt_revision_end, gerrit_url):
        """Files bug reports for new failures.

        Args:
            wpt_revision_start: The start of the imported WPT revision range
                (exclusive), i.e. the last imported revision.
            wpt_revision_end: The end of the imported WPT revision range
                (inclusive), i.e. the current imported revision.
            gerrit_url: Gerrit URL of the CL.

        Return:
            A list of MonorailIssue objects that should be filed.
        """
        imported_commits = self.local_wpt.commits_in_range(
            wpt_revision_start, wpt_revision_end)
        bugs = []
        for directory, failures in self.new_failures_by_directory.items():
            summary = '[WPT] New failures introduced in {} by import {}'.format(
                directory, gerrit_url)

            full_directory = self.host.filesystem.join(
                self.finder.web_tests_dir(), directory)
            owners_file = self.host.filesystem.join(full_directory, 'OWNERS')
            metadata_file = self.host.filesystem.join(full_directory,
                                                      'DIR_METADATA')
            is_wpt_notify_enabled = False
            try:
                is_wpt_notify_enabled = self.owners_extractor.is_wpt_notify_enabled(
                    metadata_file)
            except KeyError:
                _log.info('KeyError when parsing %s' % metadata_file)

            if not is_wpt_notify_enabled:
                _log.info("WPT-NOTIFY disabled in %s." % full_directory)
                continue

            owners = self.owners_extractor.extract_owners(owners_file)
            # owners may be empty but not None.
            cc = owners

            component = self.owners_extractor.extract_component(metadata_file)
            # component could be None.
            components = [component] if component else None

            prologue = ('WPT import {} introduced new failures in {}:\n\n'
                        'List of new failures:\n'.format(
                            gerrit_url, directory))
            failure_list = ''
            for failure in failures:
                failure_list += str(failure) + '\n'

            expectations_statement = (
                '\nExpectations or baseline files [0] have been automatically '
                'added for the failing results to keep the bots green. Please '
                'investigate the new failures and triage as appropriate.\n')

            range_statement = '\nThis import contains upstream changes from {} to {}:\n'.format(
                wpt_revision_start, wpt_revision_end)
            commit_list = self.format_commit_list(imported_commits,
                                                  full_directory)

            links_list = '\n[0]: https://chromium.googlesource.com/chromium/src/+/HEAD/docs/testing/web_test_expectations.md\n'

            description = (prologue + failure_list + expectations_statement +
                           range_statement + commit_list + links_list)

            bug = MonorailIssue.new_chromium_issue(summary,
                                                   description,
                                                   cc,
                                                   components,
                                                   labels=['Test-WebTest'])
            _log.info(bug)
            _log.info("WPT-NOTIFY enabled in %s; adding the bug to the pending list." % full_directory)
            bugs.append(bug)
        return bugs

    def format_commit_list(self, imported_commits, directory):
        """Formats the list of imported WPT commits.

        Imports affecting the given directory will be highlighted.

        Args:
            imported_commits: A list of (SHA, commit subject) pairs.
            directory: An absolute path of a directory in the Chromium repo, for
                which the list is formatted.

        Returns:
            A multi-line string.
        """
        path_from_wpt = self.host.filesystem.relpath(
            directory, self.finder.path_from_web_tests('external', 'wpt'))
        commit_list = ''
        for sha, subject in imported_commits:
            # subject is a Unicode string and can contain non-ASCII characters.
            line = u'{}: {}'.format(subject, GITHUB_COMMIT_PREFIX + sha)
            if self.local_wpt.is_commit_affecting_directory(
                    sha, path_from_wpt):
                line += ' [affecting this directory]'
            commit_list += line + '\n'
        return commit_list

    def find_owned_directory(self, test_name):
        """Finds the lowest directory that contains the test and has OWNERS.

        Args:
            The name of the test (a path relative to web_tests).

        Returns:
            The path of the found directory relative to web_tests.
        """
        # Always use non-virtual test names when looking up OWNERS.
        if self.default_port.lookup_virtual_test_base(test_name):
            test_name = self.default_port.lookup_virtual_test_base(test_name)
        # find_owners_file takes either a relative path from the *root* of the
        # repository, or an absolute path.
        abs_test_path = self.finder.path_from_web_tests(test_name)
        owners_file = self.owners_extractor.find_owners_file(
            self.host.filesystem.dirname(abs_test_path))
        if not owners_file:
            return None
        owned_directory = self.host.filesystem.dirname(owners_file)
        short_directory = self.host.filesystem.relpath(
            owned_directory, self.finder.web_tests_dir())
        return short_directory

    def file_bugs(self, bugs, dry_run, service_account_key_json=None):
        """Files a list of bugs to Monorail.

        Args:
            bugs: A list of MonorailIssue objects.
            dry_run: A boolean, whether we are in dry run mode.
            service_account_key_json: Optional, see docs for main().
        """
        # TODO(robertma): Better error handling in this method.
        if dry_run:
            _log.info(
                '[dry_run] Would have filed the %d bugs in the pending list.',
                len(bugs))
            return

        _log.info('Filing %d bugs in the pending list to Monorail', len(bugs))
        api = self._get_monorail_api(service_account_key_json)
        for index, bug in enumerate(bugs, start=1):
            response = api.insert_issue(bug)
            _log.info('[%d] Filed bug: %s', index,
                      MonorailIssue.crbug_link(response['id']))

    def _get_monorail_api(self, service_account_key_json):
        if service_account_key_json:
            return self._monorail_api(
                service_account_key_json=service_account_key_json)
        token = LuciAuth(self.host).get_access_token()
        return self._monorail_api(access_token=token)
class TestImporter(object):
    def __init__(self, host, wpt_github=None, wpt_manifests=None):
        self.host = host
        self.wpt_github = wpt_github
        self.port = host.port_factory.get()

        self.executive = host.executive
        self.fs = host.filesystem
        self.finder = PathFinder(self.fs)
        self.chromium_git = self.host.git(self.finder.chromium_base())
        self.dest_path = self.finder.path_from_web_tests('external', 'wpt')

        # A common.net.git_cl.GitCL instance.
        self.git_cl = None
        # Another Git instance with local WPT as CWD, which can only be
        # instantiated after the working directory is created.
        self.wpt_git = None
        # The WPT revision we are importing and the one imported last time.
        self.wpt_revision = None
        self.last_wpt_revision = None
        # A set of rebaselined tests and a dictionary of new test expectations
        # mapping failing tests to platforms to
        # wpt_expectations_updater.SimpleTestResult.
        self.rebaselined_tests = set()
        self.new_test_expectations = {}
        self.verbose = False

        args = ['--clean-up-affected-tests-only',
                '--clean-up-test-expectations']
        self._expectations_updater = WPTExpectationsUpdater(
            self.host, args, wpt_manifests)

    def main(self, argv=None):
        # TODO(robertma): Test this method! Split it to make it easier to test
        # if necessary.

        options = self.parse_args(argv)

        self.verbose = options.verbose
        log_level = logging.DEBUG if self.verbose else logging.INFO
        configure_logging(logging_level=log_level, include_time=True)

        # Having the full output when executive.run_command fails is useful when
        # investigating a failed import, as all we have are logs.
        self.executive.error_output_limit = None

        if options.auto_update and options.auto_upload:
            _log.error(
                '--auto-upload and --auto-update cannot be used together.')
            return 1

        if not self.checkout_is_okay():
            return 1

        credentials = read_credentials(self.host, options.credentials_json)
        gh_user = credentials.get('GH_USER')
        gh_token = credentials.get('GH_TOKEN')
        if not gh_user or not gh_token:
            _log.warning('You have not set your GitHub credentials. This '
                         'script may fail with a network error when making '
                         'an API request to GitHub.')
            _log.warning('See https://chromium.googlesource.com/chromium/src'
                         '/+/master/docs/testing/web_platform_tests.md'
                         '#GitHub-credentials for instructions on how to set '
                         'your credentials up.')
        self.wpt_github = self.wpt_github or WPTGitHub(self.host, gh_user,
                                                       gh_token)
        self.git_cl = GitCL(
            self.host, auth_refresh_token_json=options.auth_refresh_token_json)

        _log.debug('Noting the current Chromium revision.')
        chromium_revision = self.chromium_git.latest_git_commit()

        # Instantiate Git after local_wpt.fetch() to make sure the path exists.
        local_wpt = LocalWPT(self.host, gh_token=gh_token)
        local_wpt.fetch()
        self.wpt_git = self.host.git(local_wpt.path)

        if options.revision is not None:
            _log.info('Checking out %s', options.revision)
            self.wpt_git.run(['checkout', options.revision])

        _log.debug('Noting the revision we are importing.')
        self.wpt_revision = self.wpt_git.latest_git_commit()
        self.last_wpt_revision = self._get_last_imported_wpt_revision()
        import_commit = 'wpt@%s' % self.wpt_revision

        _log.info('Importing %s to Chromium %s', import_commit,
                  chromium_revision)

        if options.ignore_exportable_commits:
            commit_message = self._commit_message(chromium_revision,
                                                  import_commit)
        else:
            commits = self.apply_exportable_commits_locally(local_wpt)
            if commits is None:
                _log.error('Could not apply some exportable commits cleanly.')
                _log.error('Aborting import to prevent clobbering commits.')
                return 1
            commit_message = self._commit_message(
                chromium_revision,
                import_commit,
                locally_applied_commits=commits)

        self._clear_out_dest_path()

        _log.info('Copying the tests from the temp repo to the destination.')
        test_copier = TestCopier(self.host, local_wpt.path)
        test_copier.do_import()

        # TODO(robertma): Implement `add --all` in Git (it is different from `commit --all`).
        self.chromium_git.run(['add', '--all', self.dest_path])

        # Remove expectations for tests that were deleted and rename tests
        # in expectations for renamed tests.
        self._expectations_updater.cleanup_test_expectations_files()

        self._generate_manifest()

        # TODO(crbug.com/800570 robertma): Re-enable it once we fix the bug.
        # self._delete_orphaned_baselines()


        if not self.chromium_git.has_working_directory_changes():
            _log.info('Done: no changes to import.')
            return 0

        if self._only_wpt_manifest_changed():
            _log.info('Only manifest was updated; skipping the import.')
            return 0

        self._commit_changes(commit_message)
        _log.info('Changes imported and committed.')

        if not options.auto_upload and not options.auto_update:
            return 0

        self._upload_cl()
        _log.info('Issue: %s', self.git_cl.run(['issue']).strip())

        if not self.update_expectations_for_cl():
            return 1

        if not options.auto_update:
            return 0

        if not self.run_commit_queue_for_cl():
            return 1

        if not self.send_notifications(local_wpt, options.auto_file_bugs,
                                       options.monorail_auth_json):
            return 1

        return 0

    def update_expectations_for_cl(self):
        """Performs the expectation-updating part of an auto-import job.

        This includes triggering try jobs and waiting; then, if applicable,
        writing new baselines and TestExpectation lines, committing, and
        uploading a new patchset.

        This assumes that there is CL associated with the current branch.

        Returns True if everything is OK to continue, or False on failure.
        """
        _log.info('Triggering try jobs for updating expectations.')
        self.git_cl.trigger_try_jobs(self.blink_try_bots())
        cl_status = self.git_cl.wait_for_try_jobs(
            poll_delay_seconds=POLL_DELAY_SECONDS,
            timeout_seconds=TIMEOUT_SECONDS)

        if not cl_status:
            _log.error('No initial try job results, aborting.')
            self.git_cl.run(['set-close'])
            return False

        if cl_status.status == 'closed':
            _log.error('The CL was closed, aborting.')
            return False

        _log.info('All jobs finished.')
        try_results = cl_status.try_job_results

        if try_results and self.git_cl.some_failed(try_results):
            self.fetch_new_expectations_and_baselines()
            if self.chromium_git.has_working_directory_changes():
                self._generate_manifest()
                message = 'Update test expectations and baselines.'
                self._commit_changes(message)
                self._upload_patchset(message)
        return True

    def run_commit_queue_for_cl(self):
        """Triggers CQ and either commits or aborts; returns True on success."""
        _log.info('Triggering CQ try jobs.')
        self.git_cl.run(['try'])
        cl_status = self.git_cl.wait_for_try_jobs(
            poll_delay_seconds=POLL_DELAY_SECONDS,
            timeout_seconds=TIMEOUT_SECONDS,
            cq_only=True)

        if not cl_status:
            self.git_cl.run(['set-close'])
            _log.error('Timed out waiting for CQ; aborting.')
            return False

        if cl_status.status == 'closed':
            _log.error('The CL was closed; aborting.')
            return False

        _log.info('All jobs finished.')
        cq_try_results = cl_status.try_job_results

        if not cq_try_results:
            _log.error('No CQ try results found in try results')
            self.git_cl.run(['set-close'])
            return False

        if not self.git_cl.all_success(cq_try_results):
            _log.error('CQ appears to have failed; aborting.')
            self.git_cl.run(['set-close'])
            return False

        _log.info('CQ appears to have passed; trying to commit.')
        self.git_cl.run(['upload', '-f', '--send-mail'])  # Turn off WIP mode.
        self.git_cl.run(['set-commit'])

        if self.git_cl.wait_for_closed_status():
            _log.info('Update completed.')
            return True

        _log.error('Cannot submit CL; aborting.')
        try:
            self.git_cl.run(['set-close'])
        except ScriptError as e:
            if e.output and 'Conflict: change is merged' in e.output:
                _log.error('CL is already merged; treating as success.')
                return True
            else:
                raise e
        return False

    def blink_try_bots(self):
        """Returns the collection of builders used for updating expectations."""
        return self.host.builders.filter_builders(
            is_try=True, exclude_specifiers={'android'})

    def parse_args(self, argv):
        parser = argparse.ArgumentParser()
        parser.description = __doc__
        parser.add_argument(
            '-v',
            '--verbose',
            action='store_true',
            help='log extra details that may be helpful when debugging')
        parser.add_argument(
            '--ignore-exportable-commits',
            action='store_true',
            help='do not check for exportable commits that would be clobbered')
        parser.add_argument('-r', '--revision', help='target wpt revision')
        parser.add_argument(
            '--auto-upload',
            action='store_true',
            help='upload a CL, update expectations, but do NOT trigger CQ')
        parser.add_argument(
            '--auto-update',
            action='store_true',
            help='upload a CL, update expectations, and trigger CQ')
        parser.add_argument(
            '--auto-file-bugs',
            action='store_true',
            help='file new failures automatically to crbug.com')
        parser.add_argument(
            '--auth-refresh-token-json',
            help='authentication refresh token JSON file used for try jobs, '
            'generally not necessary on developer machines')
        parser.add_argument(
            '--credentials-json',
            help='A JSON file with GitHub credentials, '
            'generally not necessary on developer machines')
        parser.add_argument(
            '--monorail-auth-json',
            help='A JSON file containing the private key of a service account '
            'to access Monorail (crbug.com), only needed when '
            '--auto-file-bugs is used')

        return parser.parse_args(argv)

    def checkout_is_okay(self):
        if self.chromium_git.has_working_directory_changes():
            _log.warning('Checkout is dirty; aborting.')
            return False
        # TODO(robertma): Add a method in Git to query a range of commits.
        local_commits = self.chromium_git.run(
            ['log', '--oneline', 'origin/master..HEAD'])
        if local_commits:
            _log.warning('Checkout has local commits before import.')
        return True

    def apply_exportable_commits_locally(self, local_wpt):
        """Applies exportable Chromium changes to the local WPT repo.

        The purpose of this is to avoid clobbering changes that were made in
        Chromium but not yet merged upstream. By applying these changes to the
        local copy of web-platform-tests before copying files over, we make
        it so that the resulting change in Chromium doesn't undo the
        previous Chromium change.

        Args:
            A LocalWPT instance for our local copy of WPT.

        Returns:
            A list of commits applied (could be empty), or None if any
            of the patches could not be applied cleanly.
        """
        commits = self.exportable_but_not_exported_commits(local_wpt)
        for commit in commits:
            _log.info('Applying exportable commit locally:')
            _log.info(commit.url())
            _log.info('Subject: %s', commit.subject().strip())
            # Log a note about the corresponding PR.
            # This might not be necessary, and could potentially be removed.
            pull_request = self.wpt_github.pr_for_chromium_commit(commit)
            if pull_request:
                _log.info('PR: %spull/%d', WPT_GH_URL, pull_request.number)
            else:
                _log.warning('No pull request found.')
            error = local_wpt.apply_patch(commit.format_patch())
            if error:
                _log.error('Commit cannot be applied cleanly:')
                _log.error(error)
                return None
            self.wpt_git.commit_locally_with_message(
                'Applying patch %s' % commit.sha)
        return commits

    def exportable_but_not_exported_commits(self, local_wpt):
        """Returns a list of commits that would be clobbered by importer.

        The list contains all exportable but not exported commits, not filtered
        by whether they can apply cleanly.
        """
        # The errors returned by exportable_commits_over_last_n_commits are
        # irrelevant and ignored here, because it tests patches *individually*
        # while the importer tries to reapply these patches *cumulatively*.
        commits, _ = exportable_commits_over_last_n_commits(
            self.host,
            local_wpt,
            self.wpt_github,
            require_clean=False,
            verify_merged_pr=True)
        return commits

    def _generate_manifest(self):
        """Generates MANIFEST.json for imported tests.

        Runs the (newly-updated) manifest command if it's found, and then
        stages the generated MANIFEST.json in the git index, ready to commit.
        """
        _log.info('Generating MANIFEST.json')
        WPTManifest.generate_manifest(self.host, self.dest_path)
        manifest_path = self.fs.join(self.dest_path, 'MANIFEST.json')
        assert self.fs.exists(manifest_path)
        manifest_base_path = self.fs.normpath(
            self.fs.join(self.dest_path, '..', BASE_MANIFEST_NAME))
        self.copyfile(manifest_path, manifest_base_path)
        self.chromium_git.add_list([manifest_base_path])

    def _clear_out_dest_path(self):
        """Removes all files that are synced with upstream from Chromium WPT.

        Instead of relying on TestCopier to overwrite these files, cleaning up
        first ensures if upstream deletes some files, we also delete them.
        """
        _log.info('Cleaning out tests from %s.', self.dest_path)
        should_remove = lambda fs, dirname, basename: (
            is_file_exportable(fs.relpath(fs.join(dirname, basename), self.finder.chromium_base())))
        files_to_delete = self.fs.files_under(
            self.dest_path, file_filter=should_remove)
        for subpath in files_to_delete:
            self.remove(self.finder.path_from_web_tests('external', subpath))

    def _commit_changes(self, commit_message):
        _log.info('Committing changes.')
        self.chromium_git.commit_locally_with_message(commit_message)

    def _only_wpt_manifest_changed(self):
        changed_files = self.chromium_git.changed_files()
        wpt_base_manifest = self.fs.relpath(
            self.fs.join(self.dest_path, '..', BASE_MANIFEST_NAME),
            self.finder.chromium_base())
        return changed_files == [wpt_base_manifest]

    def _commit_message(self,
                        chromium_commit_sha,
                        import_commit_sha,
                        locally_applied_commits=None):
        message = 'Import {}\n\nUsing wpt-import in Chromium {}.\n'.format(
            import_commit_sha, chromium_commit_sha)
        if locally_applied_commits:
            message += 'With Chromium commits locally applied on WPT:\n'
            message += '\n'.join(
                str(commit) for commit in locally_applied_commits)
        message += '\nNo-Export: true'
        return message

    def _delete_orphaned_baselines(self):
        _log.info('Deleting any orphaned baselines.')

        is_baseline_filter = lambda fs, dirname, basename: is_testharness_baseline(basename)

        baselines = self.fs.files_under(
            self.dest_path, file_filter=is_baseline_filter)

        # Note about possible refactoring:
        #  - the manifest path could be factored out to a common location, and
        #  - the logic for reading the manifest could be factored out from here
        # and the Port class.
        manifest_path = self.finder.path_from_web_tests(
            'external', 'wpt', 'MANIFEST.json')
        manifest = WPTManifest(self.fs.read_text_file(manifest_path))
        wpt_urls = manifest.all_urls()

        # Currently baselines for tests with query strings are merged,
        # so that the tests foo.html?r=1 and foo.html?r=2 both have the same
        # baseline, foo-expected.txt.
        # TODO(qyearsley): Remove this when this behavior is fixed.
        wpt_urls = [url.split('?')[0] for url in wpt_urls]

        wpt_dir = self.finder.path_from_web_tests('external', 'wpt')
        for full_path in baselines:
            rel_path = self.fs.relpath(full_path, wpt_dir)
            if not self._has_corresponding_test(rel_path, wpt_urls):
                self.fs.remove(full_path)

    def _has_corresponding_test(self, rel_path, wpt_urls):
        # TODO(qyearsley): Ensure that this works with platform baselines and
        # virtual baselines, and add unit tests.
        base = '/' + rel_path.replace('-expected.txt', '')
        return any(
            (base + ext) in wpt_urls for ext in Port.supported_file_extensions)

    def copyfile(self, source, destination):
        _log.debug('cp %s %s', source, destination)
        self.fs.copyfile(source, destination)

    def remove(self, dest):
        _log.debug('rm %s', dest)
        self.fs.remove(dest)

    def _upload_patchset(self, message):
        self.git_cl.run(['upload', '-f', '-t', message])

    def _upload_cl(self):
        _log.info('Uploading change list.')
        directory_owners = self.get_directory_owners()
        description = self._cl_description(directory_owners)
        sheriff_email = self.tbr_reviewer()

        temp_file, temp_path = self.fs.open_text_tempfile()
        temp_file.write(description)
        temp_file.close()

        self.git_cl.run([
            'upload',
            '-f',
            '--message-file',
            temp_path,
            '--tbrs',
            sheriff_email,
            # Note: we used to CC all the directory owners, but have stopped
            # in search of a better notification mechanism. (crbug.com/765334)
            '--cc',
            '*****@*****.**',
        ])

        self.fs.remove(temp_path)

    def get_directory_owners(self):
        """Returns a mapping of email addresses to owners of changed tests."""
        _log.info('Gathering directory owners emails to CC.')
        changed_files = self.chromium_git.changed_files()
        extractor = DirectoryOwnersExtractor(self.host)
        return extractor.list_owners(changed_files)

    def _cl_description(self, directory_owners):
        """Returns a CL description string.

        Args:
            directory_owners: A dict of tuples of owner names to lists of directories.
        """
        # TODO(robertma): Add a method in Git for getting the commit body.
        description = self.chromium_git.run(['log', '-1', '--format=%B'])
        description += (
            'Note to sheriffs: This CL imports external tests and adds\n'
            'expectations for those tests; if this CL is large and causes\n'
            'a few new failures, please fix the failures by adding new\n'
            'lines to TestExpectations rather than reverting. See:\n'
            'https://chromium.googlesource.com'
            '/chromium/src/+/master/docs/testing/web_platform_tests.md\n\n')

        if directory_owners:
            description += self._format_directory_owners(
                directory_owners) + '\n\n'

        # Prevent FindIt from auto-reverting import CLs.
        description += 'NOAUTOREVERT=true\n'

        # Move any No-Export tag to the end of the description.
        description = description.replace('No-Export: true', '')
        description = description.replace('\n\n\n\n', '\n\n')
        description += 'No-Export: true\n'

        # Add the wptrunner MVP tryjobs as blocking trybots, to catch any test
        # changes or infrastructure changes from upstream.
        #
        # If this starts blocking the importer unnecessarily, revert
        # https://chromium-review.googlesource.com/c/chromium/src/+/2451504
        description += (
            'Cq-Include-Trybots: luci.chromium.try:linux-wpt-identity-fyi-rel,'
            'linux-wpt-payments-fyi-rel')

        return description

    @staticmethod
    def _format_directory_owners(directory_owners):
        message_lines = ['Directory owners for changes in this CL:']
        for owner_tuple, directories in sorted(directory_owners.items()):
            message_lines.append(', '.join(owner_tuple) + ':')
            message_lines.extend('  ' + d for d in directories)
        return '\n'.join(message_lines)

    def tbr_reviewer(self):
        """Returns the email address to use as the reviewer.

        This tries to fetch the current ecosystem infra sheriff, but falls back
        in case of error.
        """
        email = ''
        try:
            email = self._fetch_ecosystem_infra_sheriff_email()
        except (IOError, KeyError, ValueError) as error:
            _log.error('Exception while fetching current sheriff: %s', error)
        if email in ['*****@*****.**']:
            _log.warning('Cannot TBR by %s: not a committer', email)
            email = ''
        return email or TBR_FALLBACK

    def _fetch_ecosystem_infra_sheriff_email(self):
        try:
            content = self.host.web.get_binary(ROTATIONS_URL)
        except NetworkTimeout:
            _log.error('Cannot fetch %s', ROTATIONS_URL)
            return ''
        data = json.loads(content)
        if not data.get('emails'):
            _log.error(
                'No email found for current sheriff. Retrieved content: %s',
                content)
            return ''
        return data['emails'][0]

    def fetch_new_expectations_and_baselines(self):
        """Modifies expectation lines and baselines based on try job results.

        Assuming that there are some try job results available, this
        adds new expectation lines to TestExpectations and downloads new
        baselines based on the try job results.

        This is the same as invoking the `wpt-update-expectations` script.
        """
        _log.info('Adding test expectations lines to TestExpectations.')
        self.rebaselined_tests, self.new_test_expectations = (
            self._expectations_updater.update_expectations())

    def _get_last_imported_wpt_revision(self):
        """Finds the last imported WPT revision."""
        # TODO(robertma): Only match commit subjects.
        output = self.chromium_git.most_recent_log_matching(
            '^Import wpt@', self.finder.chromium_base())
        # No line-start anchor (^) below because of the formatting of output.
        result = re.search(r'Import wpt@(\w+)', output)
        if result:
            return result.group(1)
        else:
            _log.error('Cannot find last WPT import.')
            return None

    def send_notifications(self, local_wpt, auto_file_bugs,
                           monorail_auth_json):
        issue = self.git_cl.run(['status', '--field=id']).strip()
        patchset = self.git_cl.run(['status', '--field=patch']).strip()
        # Construct the notifier here so that any errors won't affect the import.
        notifier = ImportNotifier(self.host, self.chromium_git, local_wpt)
        notifier.main(
            self.last_wpt_revision,
            self.wpt_revision,
            self.rebaselined_tests,
            self.new_test_expectations,
            issue,
            patchset,
            dry_run=not auto_file_bugs,
            service_account_key_json=monorail_auth_json)
        return True
Exemple #13
0
 def test_path_from_web_tests(self):
     finder = PathFinder(MockFileSystem())
     self.assertEqual(
         finder.path_from_web_tests('external', 'wpt'),
         '/mock-checkout/' + RELATIVE_WEB_TESTS + 'external/wpt')
class ImportNotifier(object):
    def __init__(self, host, chromium_git, local_wpt):
        self.host = host
        self.git = chromium_git
        self.local_wpt = local_wpt

        self._monorail_api = MonorailAPI
        self.default_port = host.port_factory.get()
        self.finder = PathFinder(host.filesystem)
        self.owners_extractor = DirectoryOwnersExtractor(host.filesystem)
        self.new_failures_by_directory = defaultdict(list)

    def main(self,
             wpt_revision_start,
             wpt_revision_end,
             rebaselined_tests,
             test_expectations,
             issue,
             patchset,
             dry_run=True,
             service_account_key_json=None):
        """Files bug reports for new failures.

        Args:
            wpt_revision_start: The start of the imported WPT revision range
                (exclusive), i.e. the last imported revision.
            wpt_revision_end: The end of the imported WPT revision range
                (inclusive), i.e. the current imported revision.
            rebaselined_tests: A list of test names that have been rebaselined.
            test_expectations: A dictionary mapping names of tests that cannot
                be rebaselined to a list of new test expectation lines.
            issue: The issue number of the import CL (a string).
            patchset: The patchset number of the import CL (a string).
            dry_run: If True, no bugs will be actually filed to crbug.com.
            service_account_key_json: The path to a JSON private key of a
                service account for accessing Monorail. If None, try to get an
                access token from luci-auth.

        Note: "test names" are paths of the tests relative to web_tests.
        """
        gerrit_url = SHORT_GERRIT_PREFIX + issue
        gerrit_url_with_ps = gerrit_url + '/' + patchset + '/'

        changed_test_baselines = self.find_changed_baselines_of_tests(
            rebaselined_tests)
        self.examine_baseline_changes(changed_test_baselines,
                                      gerrit_url_with_ps)
        self.examine_new_test_expectations(test_expectations)

        bugs = self.create_bugs_from_new_failures(wpt_revision_start,
                                                  wpt_revision_end, gerrit_url)
        self.file_bugs(bugs, dry_run, service_account_key_json)

    def find_changed_baselines_of_tests(self, rebaselined_tests):
        """Finds the corresponding changed baselines of each test.

        Args:
            rebaselined_tests: A list of test names that have been rebaselined.

        Returns:
            A dictionary mapping test names to paths of their baselines changed
            in this import CL (paths relative to the root of Chromium repo).
        """
        test_baselines = {}
        changed_files = self.git.changed_files()
        for test_name in rebaselined_tests:
            test_without_ext, _ = self.host.filesystem.splitext(test_name)
            changed_baselines = []
            # TODO(robertma): Refactor this into web_tests.port.base.
            baseline_name = test_without_ext + '-expected.txt'
            for changed_file in changed_files:
                if changed_file.endswith(baseline_name):
                    changed_baselines.append(changed_file)
            if changed_baselines:
                test_baselines[test_name] = changed_baselines
        return test_baselines

    def examine_baseline_changes(self, changed_test_baselines,
                                 gerrit_url_with_ps):
        """Examines all changed baselines to find new failures.

        Args:
            changed_test_baselines: A dictionary mapping test names to paths of
                changed baselines.
            gerrit_url_with_ps: Gerrit URL of this CL with the patchset number.
        """
        for test_name, changed_baselines in changed_test_baselines.iteritems():
            directory = self.find_owned_directory(test_name)
            if not directory:
                _log.warning('Cannot find OWNERS of %s', test_name)
                continue

            for baseline in changed_baselines:
                if self.more_failures_in_baseline(baseline):
                    self.new_failures_by_directory[directory].append(
                        TestFailure(
                            TestFailure.BASELINE_CHANGE,
                            test_name,
                            baseline_path=baseline,
                            gerrit_url_with_ps=gerrit_url_with_ps))

    def more_failures_in_baseline(self, baseline):
        diff = self.git.run(['diff', '-U0', 'origin/master', '--', baseline])
        delta_failures = 0
        for line in diff.splitlines():
            if line.startswith('+FAIL'):
                delta_failures += 1
            if line.startswith('-FAIL'):
                delta_failures -= 1
        return delta_failures > 0

    def examine_new_test_expectations(self, test_expectations):
        """Examines new test expectations to find new failures.

        Args:
            test_expectations: A dictionary mapping names of tests that cannot
                be rebaselined to a list of new test expectation lines.
        """
        for test_name, expectation_lines in test_expectations.iteritems():
            directory = self.find_owned_directory(test_name)
            if not directory:
                _log.warning('Cannot find OWNERS of %s', test_name)
                continue

            for expectation_line in expectation_lines:
                self.new_failures_by_directory[directory].append(
                    TestFailure(
                        TestFailure.NEW_EXPECTATION,
                        test_name,
                        expectation_line=expectation_line))

    def create_bugs_from_new_failures(self, wpt_revision_start,
                                      wpt_revision_end, gerrit_url):
        """Files bug reports for new failures.

        Args:
            wpt_revision_start: The start of the imported WPT revision range
                (exclusive), i.e. the last imported revision.
            wpt_revision_end: The end of the imported WPT revision range
                (inclusive), i.e. the current imported revision.
            gerrit_url: Gerrit URL of the CL.

        Return:
            A list of MonorailIssue objects that should be filed.
        """
        imported_commits = self.local_wpt.commits_in_range(
            wpt_revision_start, wpt_revision_end)
        bugs = []
        for directory, failures in self.new_failures_by_directory.iteritems():
            summary = '[WPT] New failures introduced in {} by import {}'.format(
                directory, gerrit_url)

            full_directory = self.host.filesystem.join(
                self.finder.web_tests_dir(), directory)
            owners_file = self.host.filesystem.join(full_directory, 'OWNERS')
            is_wpt_notify_enabled = self.owners_extractor.is_wpt_notify_enabled(
                owners_file)

            owners = self.owners_extractor.extract_owners(owners_file)
            # owners may be empty but not None.
            cc = owners

            component = self.owners_extractor.extract_component(owners_file)
            # component could be None.
            components = [component] if component else None

            prologue = ('WPT import {} introduced new failures in {}:\n\n'
                        'List of new failures:\n'.format(
                            gerrit_url, directory))
            failure_list = ''
            for failure in failures:
                failure_list += str(failure) + '\n'

            epilogue = '\nThis import contains upstream changes from {} to {}:\n'.format(
                wpt_revision_start, wpt_revision_end)
            commit_list = self.format_commit_list(imported_commits,
                                                  full_directory)

            description = prologue + failure_list + epilogue + commit_list

            bug = MonorailIssue.new_chromium_issue(summary, description, cc,
                                                   components)
            _log.info(unicode(bug))

            if is_wpt_notify_enabled:
                _log.info(
                    "WPT-NOTIFY enabled in this directory; adding the bug to the pending list."
                )
                bugs.append(bug)
            else:
                _log.info(
                    "WPT-NOTIFY disabled in this directory; discarding the bug."
                )
        return bugs

    def format_commit_list(self, imported_commits, directory):
        """Formats the list of imported WPT commits.

        Imports affecting the given directory will be highlighted.

        Args:
            imported_commits: A list of (SHA, commit subject) pairs.
            directory: An absolute path of a directory in the Chromium repo, for
                which the list is formatted.

        Returns:
            A multi-line string.
        """
        path_from_wpt = self.host.filesystem.relpath(
            directory, self.finder.path_from_web_tests('external', 'wpt'))
        commit_list = ''
        for sha, subject in imported_commits:
            # subject is a Unicode string and can contain non-ASCII characters.
            line = u'{}: {}'.format(subject, GITHUB_COMMIT_PREFIX + sha)
            if self.local_wpt.is_commit_affecting_directory(
                    sha, path_from_wpt):
                line += ' [affecting this directory]'
            commit_list += line + '\n'
        return commit_list

    def find_owned_directory(self, test_name):
        """Finds the lowest directory that contains the test and has OWNERS.

        Args:
            The name of the test (a path relative to web_tests).

        Returns:
            The path of the found directory relative to web_tests.
        """
        # Always use non-virtual test names when looking up OWNERS.
        if self.default_port.lookup_virtual_test_base(test_name):
            test_name = self.default_port.lookup_virtual_test_base(test_name)
        # find_owners_file takes either a relative path from the *root* of the
        # repository, or an absolute path.
        abs_test_path = self.finder.path_from_web_tests(test_name)
        owners_file = self.owners_extractor.find_owners_file(
            self.host.filesystem.dirname(abs_test_path))
        if not owners_file:
            return None
        owned_directory = self.host.filesystem.dirname(owners_file)
        short_directory = self.host.filesystem.relpath(
            owned_directory, self.finder.web_tests_dir())
        return short_directory

    def file_bugs(self, bugs, dry_run, service_account_key_json=None):
        """Files a list of bugs to Monorail.

        Args:
            bugs: A list of MonorailIssue objects.
            dry_run: A boolean, whether we are in dry run mode.
            service_account_key_json: Optional, see docs for main().
        """
        # TODO(robertma): Better error handling in this method.
        if dry_run:
            _log.info(
                '[dry_run] Would have filed the %d bugs in the pending list.',
                len(bugs))
            return

        _log.info('Filing %d bugs in the pending list to Monorail', len(bugs))
        api = self._get_monorail_api(service_account_key_json)
        for index, bug in enumerate(bugs, start=1):
            response = api.insert_issue(bug)
            _log.info('[%d] Filed bug: %s', index,
                      MonorailIssue.crbug_link(response['id']))

    def _get_monorail_api(self, service_account_key_json):
        if service_account_key_json:
            return self._monorail_api(
                service_account_key_json=service_account_key_json)
        token = LuciAuth(self.host).get_access_token()
        return self._monorail_api(access_token=token)
Exemple #15
0
def absolute_chromium_wpt_dir(host):
    finder = PathFinder(host.filesystem)
    return finder.path_from_web_tests('external', 'wpt')
Exemple #16
0
class TryFlag(object):

    def __init__(self, argv, host, git_cl):
        self._args = parse_args(argv)
        self._host = host
        self._git_cl = git_cl
        self._expectations_model = TestExpectationsModel()
        self._test_configuration_converter = TestConfigurationConverter(
            set(BUILDER_CONFIGS.values()),
            self._host.port_factory.get().configuration_specifier_macros())
        self._filesystem = self._host.filesystem
        self._path_finder = PathFinder(self._filesystem)
        self._git = self._host.git()

    def _force_flag_for_test_runner(self):
        flag = self._args.flag
        path = self._path_finder.path_from_web_tests(FLAG_FILE)
        self._filesystem.write_text_file(path, flag + '\n')
        self._git.add_list([path])
        self._git.commit_locally_with_message(
            'Flag try job: force %s for run_web_tests.py.' % flag)

    def _flag_expectations_path(self):
        return self._path_finder.path_from_web_tests(
            'FlagExpectations', self._args.flag.lstrip('-'))

    def _clear_expectations(self):
        path = self._flag_expectations_path()
        self._filesystem.write_text_file(path, '')
        self._git.add_list([path])
        self._git.commit_locally_with_message(
            'Flag try job: clear expectations for %s.' % self._args.flag)

    def _tests_in_flag_expectations(self):
        result = set()
        path = self._flag_expectations_path()
        for line in self._filesystem.read_text_file(path).split('\n'):
            expectation_line = TestExpectationLine.tokenize_line(
                path, line, 0, self._host.port_factory.get())
            test_name = expectation_line.name
            if test_name:
                result.add(test_name)
        return result

    def trigger(self):
        self._force_flag_for_test_runner()
        if self._args.regenerate:
            self._clear_expectations()
        self._git_cl.run(['upload', '--bypass-hooks', '-f',
                          '-m', 'Flag try job for %s.' % self._args.flag])
        for builder in sorted(BUILDER_BUCKETS):
            bucket = BUILDER_BUCKETS[builder]
            self._git_cl.trigger_try_jobs([builder], bucket)

    def _create_expectation_line(self, result, test_configuration):
        test_name = result.test_name()
        line = TestExpectationLine()
        line.name = test_name
        line.path = test_name
        line.matching_tests = [test_name]
        line.filename = ''
        if self._args.bug:
            line.bugs = ['crbug.com/%s' % self._args.bug]
        else:
            line.bugs = ['Bug(none)']
        line.expectations = result.actual_results().split()
        line.parsed_expectations = [
            TestExpectations.expectation_from_string(expectation)
            for expectation in line.expectations]
        line.specifiers = [test_configuration.version]
        line.matching_configurations = set([test_configuration])
        return line

    def _process_result(self, build, result):
        if not result.did_run_as_expected():
            self._expectations_model.add_expectation_line(
                self._create_expectation_line(
                    result,
                    BUILDER_CONFIGS[build.builder_name]),
                model_all_expectations=True)

    def update(self):
        self._host.print_('Fetching results...')
        # TODO: Get jobs from the _tryflag branch. Current branch for now.
        jobs = self._git_cl.latest_try_jobs(builder_names=BUILDER_CONFIGS.keys())
        results_fetcher = self._host.results_fetcher
        for build in sorted(jobs):
            self._host.print_('-- %s: %s/results.html' % (
                BUILDER_CONFIGS[build.builder_name].version,
                results_fetcher.results_url(build.builder_name, build.build_number)))
            results = results_fetcher.fetch_results(build, True)
            results.for_each_test(
                lambda result, b=build: self._process_result(b, result))

        # TODO: Write to flag expectations file. For now, stdout. :)
        unexpected_failures = []
        unexpected_passes = []
        tests_in_flag_expectations = self._tests_in_flag_expectations()
        for line in self._expectations_model.all_lines():
            is_pass = (TestExpectations.EXPECTATIONS['pass'] in
                       line.parsed_expectations)
            if not is_pass:
                unexpected_failures.append(line)
            elif line.name in tests_in_flag_expectations:
                unexpected_passes.append(line)

        self._print_all(unexpected_passes, 'unexpected passes')
        self._print_all(unexpected_failures, 'unexpected failures')

    def _print_all(self, lines, description):
        self._host.print_('\n### %s %s:\n' % (len(lines), description))
        for line in lines:
            self._host.print_(line.to_string(
                self._test_configuration_converter))

    def run(self):
        action = self._args.action
        if action == 'trigger':
            self.trigger()
        elif action == 'update':
            self.update()
        else:
            print >> self._host.stderr, 'specify "trigger" or "update"'
            return 1
        return 0
class TestCopier(object):
    def __init__(self, host, source_repo_path):
        """Initializes variables to prepare for copying and converting files.

        Args:
            host: An instance of Host.
            source_repo_path: Path to the local checkout of web-platform-tests.
        """
        self.host = host

        assert self.host.filesystem.exists(source_repo_path)
        self.source_repo_path = source_repo_path

        self.filesystem = self.host.filesystem
        self.path_finder = PathFinder(self.filesystem)
        self.web_tests_dir = self.path_finder.web_tests_dir()
        self.destination_directory = self.filesystem.normpath(
            self.filesystem.join(
                self.web_tests_dir, DEST_DIR_NAME,
                self.filesystem.basename(self.source_repo_path)))
        self.import_in_place = (
            self.source_repo_path == self.destination_directory)
        self.dir_above_repo = self.filesystem.dirname(self.source_repo_path)

        self.import_list = []

        # This is just a FYI list of CSS properties that still need to be prefixed,
        # which may be output after importing.
        self._prefixed_properties = {}

    def do_import(self):
        _log.info('Importing %s into %s', self.source_repo_path,
                  self.destination_directory)
        self.find_importable_tests()
        self.import_tests()

    def find_importable_tests(self):
        """Walks through the source directory to find what tests should be imported.

        This function sets self.import_list, which contains information about how many
        tests are being imported, and their source and destination paths.
        """
        paths_to_skip = self.find_paths_to_skip()

        for root, dirs, files in self.filesystem.walk(self.source_repo_path):
            cur_dir = root.replace(self.dir_above_repo + '/', '') + '/'
            _log.debug('Scanning %s...', cur_dir)

            dirs_to_skip = ('.git', )

            if dirs:
                for name in dirs_to_skip:
                    if name in dirs:
                        dirs.remove(name)

                for path in paths_to_skip:
                    path_base = path.replace(DEST_DIR_NAME + '/', '')
                    path_base = path_base.replace(cur_dir, '')
                    path_full = self.filesystem.join(root, path_base)
                    if path_base in dirs:
                        _log.info('Skipping: %s', path_full)
                        dirs.remove(path_base)
                        if self.import_in_place:
                            self.filesystem.rmtree(path_full)

            copy_list = []

            for filename in files:
                path_full = self.filesystem.join(root, filename)
                path_base = path_full.replace(self.source_repo_path + '/', '')
                path_base = self.destination_directory.replace(
                    self.web_tests_dir + '/', '') + '/' + path_base
                if path_base in paths_to_skip:
                    if self.import_in_place:
                        _log.debug('Pruning: %s', path_base)
                        self.filesystem.remove(path_full)
                        continue
                    else:
                        continue
                # FIXME: This block should really be a separate function, but the early-continues make that difficult.

                if is_basename_skipped(filename):
                    _log.debug('Skipping: %s', path_full)
                    _log.debug(
                        '  Reason: This file may cause Chromium presubmit to fail.'
                    )
                    continue

                copy_list.append({'src': path_full, 'dest': filename})

            if copy_list:
                # Only add this directory to the list if there's something to import
                self.import_list.append({
                    'dirname': root,
                    'copy_list': copy_list
                })

    def find_paths_to_skip(self):
        paths_to_skip = set()
        port = self.host.port_factory.get()
        w3c_import_expectations_path = self.path_finder.path_from_web_tests(
            'W3CImportExpectations')
        w3c_import_expectations = self.filesystem.read_text_file(
            w3c_import_expectations_path)
        parser = TestExpectationParser(port, all_tests=(), is_lint_mode=False)
        expectation_lines = parser.parse(w3c_import_expectations_path,
                                         w3c_import_expectations)
        for line in expectation_lines:
            if 'SKIP' in line.expectations:
                if line.specifiers:
                    _log.warning(
                        'W3CImportExpectations:%s should not have any specifiers',
                        line.line_numbers)
                    continue
                paths_to_skip.add(line.name)
        return paths_to_skip

    def import_tests(self):
        """Reads |self.import_list|, and converts and copies files to their destination."""
        for dir_to_copy in self.import_list:
            if not dir_to_copy['copy_list']:
                continue

            orig_path = dir_to_copy['dirname']

            relative_dir = self.filesystem.relpath(orig_path,
                                                   self.source_repo_path)
            dest_dir = self.filesystem.join(self.destination_directory,
                                            relative_dir)

            if not self.filesystem.exists(dest_dir):
                self.filesystem.maybe_make_directory(dest_dir)

            for file_to_copy in dir_to_copy['copy_list']:
                self.copy_file(file_to_copy, dest_dir)

        _log.info('')
        _log.info('Import complete')
        _log.info('')

        if self._prefixed_properties:
            _log.info('Properties needing prefixes (by count):')
            for prefixed_property in sorted(
                    self._prefixed_properties,
                    key=lambda p: self._prefixed_properties[p]):
                _log.info('  %s: %s', prefixed_property,
                          self._prefixed_properties[prefixed_property])

    def copy_file(self, file_to_copy, dest_dir):
        """Converts and copies a file, if it should be copied.

        Args:
            file_to_copy: A dict in a file copy list constructed by
                find_importable_tests, which represents one file to copy, including
                the keys:
                    "src": Absolute path to the source location of the file.
                    "destination": File name of the destination file.
                And possibly also the keys "reference_support_info" or "is_jstest".
            dest_dir: Path to the directory where the file should be copied.
        """
        source_path = self.filesystem.normpath(file_to_copy['src'])
        dest_path = self.filesystem.join(dest_dir, file_to_copy['dest'])

        if self.filesystem.isdir(source_path):
            _log.error('%s refers to a directory', source_path)
            return

        if not self.filesystem.exists(source_path):
            _log.error('%s not found. Possible error in the test.',
                       source_path)
            return

        if not self.filesystem.exists(self.filesystem.dirname(dest_path)):
            if not self.import_in_place:
                self.filesystem.maybe_make_directory(
                    self.filesystem.dirname(dest_path))

        relpath = self.filesystem.relpath(dest_path, self.web_tests_dir)
        # FIXME: Maybe doing a file diff is in order here for existing files?
        # In other words, there's no sense in overwriting identical files, but
        # there's no harm in copying the identical thing.
        _log.debug('  copying %s', relpath)

        if not self.import_in_place:
            self.filesystem.copyfile(source_path, dest_path)
            if self.filesystem.read_binary_file(source_path)[:2] == '#!':
                self.filesystem.make_executable(dest_path)