Exemple #1
0
    def __init__(self, host, source_directory, options):
        self.host = host
        self.source_directory = source_directory
        self.options = options

        self.filesystem = self.host.filesystem

        webkit_finder = WebKitFinder(self.filesystem)
        self._webkit_root = webkit_finder.webkit_base()

        self.destination_directory = webkit_finder.path_from_webkit_base("LayoutTests", options.destination)
        self.tests_w3c_relative_path = self.filesystem.join('imported', 'w3c')
        self.layout_tests_path = webkit_finder.path_from_webkit_base('LayoutTests')
        self.layout_tests_w3c_path = self.filesystem.join(self.layout_tests_path, self.tests_w3c_relative_path)
        self.tests_download_path = webkit_finder.path_from_webkit_base('WebKitBuild', 'w3c-tests')

        self._test_downloader = None

        self._potential_test_resource_files = []

        self.import_list = []
        self._importing_downloaded_tests = source_directory is None

        self._test_resource_files_json_path = self.filesystem.join(self.layout_tests_w3c_path, "resources", "resource-files.json")
        self._test_resource_files = json.loads(self.filesystem.read_text_file(self._test_resource_files_json_path)) if self.filesystem.exists(self._test_resource_files_json_path) else None

        self._tests_options_json_path = self.filesystem.join(self.layout_tests_path, 'tests-options.json')
        self._tests_options = json.loads(self.filesystem.read_text_file(self._tests_options_json_path)) if self.filesystem.exists(self._tests_options_json_path) else None
        self._slow_tests = []

        if self.options.clean_destination_directory and self._test_resource_files:
            self._test_resource_files["files"] = []
            if self._tests_options:
                self.remove_slow_from_w3c_tests_options()
Exemple #2
0
    def __init__(self, host, test_paths, options):
        self.host = host
        self.source_directory = options.source
        self.options = options
        self.test_paths = test_paths if test_paths else []

        self.filesystem = self.host.filesystem

        webkit_finder = WebKitFinder(self.filesystem)
        self._webkit_root = webkit_finder.webkit_base()

        self.destination_directory = webkit_finder.path_from_webkit_base("LayoutTests", options.destination)
        self.tests_w3c_relative_path = self.filesystem.join('imported', 'w3c')
        self.layout_tests_path = webkit_finder.path_from_webkit_base('LayoutTests')
        self.layout_tests_w3c_path = self.filesystem.join(self.layout_tests_path, self.tests_w3c_relative_path)
        self.tests_download_path = webkit_finder.path_from_webkit_base('WebKitBuild', 'w3c-tests')

        self._test_downloader = None

        self._potential_test_resource_files = []

        self.import_list = []
        self._importing_downloaded_tests = self.source_directory is None

        self._test_resource_files_json_path = self.filesystem.join(self.layout_tests_w3c_path, "resources", "resource-files.json")
        self._test_resource_files = json.loads(self.filesystem.read_text_file(self._test_resource_files_json_path)) if self.filesystem.exists(self._test_resource_files_json_path) else None

        self._tests_options_json_path = self.filesystem.join(self.layout_tests_path, 'tests-options.json')
        self._tests_options = json.loads(self.filesystem.read_text_file(self._tests_options_json_path)) if self.filesystem.exists(self._tests_options_json_path) else None
        self._slow_tests = []

        if self.options.clean_destination_directory and self._test_resource_files:
            self._test_resource_files["files"] = []
            if self._tests_options:
                self.remove_slow_from_w3c_tests_options()
Exemple #3
0
 def _determine_driver_path_statically(cls, host, options):
     config_object = config.Config(host.executive, host.filesystem)
     build_directory = getattr(options, "build_directory", None)
     finder = WebKitFinder(host.filesystem)
     webkit_base = finder.webkit_base()
     chromium_base = finder.chromium_base()
     driver_name = cls.SKY_SHELL_NAME
     if hasattr(options, "configuration") and options.configuration:
         configuration = options.configuration
     else:
         configuration = config_object.default_configuration()
     return cls._static_build_path(host.filesystem, build_directory, chromium_base, configuration, [driver_name])
    def __init__(self, host, source_directory, options):
        self.host = host
        self.source_directory = source_directory
        self.options = options

        self.filesystem = self.host.filesystem

        webkit_finder = WebKitFinder(self.filesystem)
        self._webkit_root = webkit_finder.webkit_base()

        self.destination_directory = webkit_finder.path_from_webkit_base("LayoutTests", options.destination)

        self.import_list = []
Exemple #5
0
    def __init__(self, host, source_directory, options):
        self.host = host
        self.source_directory = source_directory
        self.options = options

        self.filesystem = self.host.filesystem

        webkit_finder = WebKitFinder(self.filesystem)
        self._webkit_root = webkit_finder.webkit_base()

        self.destination_directory = webkit_finder.path_from_webkit_base("LayoutTests", options.destination)

        self.import_list = []
Exemple #6
0
 def _determine_driver_path_statically(cls, host, options):
     config_object = config.Config(host.executive, host.filesystem)
     build_directory = getattr(options, 'build_directory', None)
     finder = WebKitFinder(host.filesystem)
     webkit_base = finder.webkit_base()
     chromium_base = finder.chromium_base()
     driver_name = getattr(options, 'driver_name', None)
     if driver_name is None:
         driver_name = cls.CONTENT_SHELL_NAME
     if hasattr(options, 'configuration') and options.configuration:
         configuration = options.configuration
     else:
         configuration = config_object.default_configuration()
     return cls._static_build_path(host.filesystem, build_directory, chromium_base, configuration, [driver_name])
Exemple #7
0
    def __init__(self, host, source_directory, repo_dir, options):
        self.host = host
        self.source_directory = source_directory
        self.options = options

        self.filesystem = self.host.filesystem

        webkit_finder = WebKitFinder(self.filesystem)
        self._webkit_root = webkit_finder.webkit_base()
        self.repo_dir = repo_dir

        self.destination_directory = webkit_finder.path_from_webkit_base("LayoutTests", options.destination)

        self.changeset = CHANGESET_NOT_AVAILABLE

        self.import_list = []
    def __init__(self, host, source_directory, repo_dir, options):
        self.host = host
        self.source_directory = source_directory
        self.options = options

        self.filesystem = self.host.filesystem

        webkit_finder = WebKitFinder(self.filesystem)
        self._webkit_root = webkit_finder.webkit_base()
        self.repo_dir = repo_dir

        self.destination_directory = webkit_finder.path_from_webkit_base("LayoutTests", options.destination)

        self.changeset = CHANGESET_NOT_AVAILABLE
        self.test_status = TEST_STATUS_UNKNOWN

        self.import_list = []
Exemple #9
0
    def __init__(self, host, source_directory, repo_dir, options):
        self.host = host
        self.source_directory = source_directory
        self.options = options

        self.filesystem = self.host.filesystem

        webkit_finder = WebKitFinder(self.filesystem)
        self._webkit_root = webkit_finder.webkit_base()
        self.repo_dir = self.filesystem.abspath(repo_dir)

        self.destination_directory = webkit_finder.path_from_webkit_base("LayoutTests", options.destination, self.filesystem.basename(self.repo_dir))

        self.changeset = CHANGESET_NOT_AVAILABLE
        self.test_status = TEST_STATUS_UNKNOWN

        self.import_list = []
Exemple #10
0
    def __init__(self, host, source_directory, options):
        self.host = host
        self.source_directory = source_directory
        self.options = options

        self.filesystem = self.host.filesystem

        webkit_finder = WebKitFinder(self.filesystem)
        self._webkit_root = webkit_finder.webkit_base()

        self.destination_directory = webkit_finder.path_from_webkit_base("LayoutTests", options.destination)
        self.layout_tests_w3c_path = webkit_finder.path_from_webkit_base('LayoutTests', 'imported', 'w3c')
        self.tests_download_path = webkit_finder.path_from_webkit_base('WebKitBuild', 'w3c-tests')

        self._test_downloader = None

        self.import_list = []
        self._importing_downloaded_tests = source_directory is None
Exemple #11
0
    def __init__(self, host, source_directory, options):
        self.host = host
        self.source_directory = source_directory
        self.options = options

        self.filesystem = self.host.filesystem

        webkit_finder = WebKitFinder(self.filesystem)
        self._webkit_root = webkit_finder.webkit_base()

        self.destination_directory = webkit_finder.path_from_webkit_base("LayoutTests", options.destination)
        self.tests_w3c_relative_path = self.filesystem.join("imported", "w3c")
        self.layout_tests_w3c_path = webkit_finder.path_from_webkit_base("LayoutTests", self.tests_w3c_relative_path)
        self.tests_download_path = webkit_finder.path_from_webkit_base("WebKitBuild", "w3c-tests")

        self._test_downloader = None

        self._potential_test_resource_files = []

        self.import_list = []
        self._importing_downloaded_tests = source_directory is None
Exemple #12
0
class TestImporter(object):
    def __init__(self, host, dir_to_import, top_of_repo, options):
        self.host = host
        self.dir_to_import = dir_to_import
        self.top_of_repo = top_of_repo
        self.options = options

        self.filesystem = self.host.filesystem
        self.webkit_finder = WebKitFinder(self.filesystem)
        self._webkit_root = self.webkit_finder.webkit_base()
        self.layout_tests_dir = self.webkit_finder.path_from_webkit_base(
            'LayoutTests')
        self.destination_directory = self.filesystem.normpath(
            self.filesystem.join(self.layout_tests_dir, options.destination,
                                 self.filesystem.basename(self.top_of_repo)))
        self.import_in_place = (
            self.dir_to_import == self.destination_directory)
        self.dir_above_repo = self.filesystem.dirname(self.top_of_repo)

        self.changeset = CHANGESET_NOT_AVAILABLE

        self.import_list = []

    def do_import(self):
        _log.info("Importing %s into %s", self.dir_to_import,
                  self.destination_directory)
        self.find_importable_tests(self.dir_to_import)
        self.load_changeset()
        self.import_tests()

    def load_changeset(self):
        """Returns the current changeset from mercurial or "Not Available"."""
        try:
            self.changeset = self.host.executive.run_command(
                ['hg', 'tip']).split('changeset:')[1]
        except (OSError, ScriptError):
            self.changeset = CHANGESET_NOT_AVAILABLE

    def find_importable_tests(self, directory):
        # FIXME: use filesystem
        paths_to_skip = self.find_paths_to_skip()

        for root, dirs, files in os.walk(directory):
            cur_dir = root.replace(self.dir_above_repo + '/', '') + '/'
            _log.info('  scanning ' + cur_dir + '...')
            total_tests = 0
            reftests = 0
            jstests = 0

            DIRS_TO_SKIP = ('.git', '.hg')
            if dirs:
                for d in DIRS_TO_SKIP:
                    if d in dirs:
                        dirs.remove(d)

                for path in paths_to_skip:
                    path_base = path.replace(self.options.destination + '/',
                                             '')
                    path_base = path_base.replace(cur_dir, '')
                    path_full = self.filesystem.join(root, path_base)
                    if path_base in dirs:
                        dirs.remove(path_base)
                        if not self.options.dry_run and self.import_in_place:
                            _log.info("  pruning %s" % path_base)
                            self.filesystem.rmtree(path_full)
                        else:
                            _log.info("  skipping %s" % path_base)

            copy_list = []

            for filename in files:
                path_full = self.filesystem.join(root, filename)
                path_base = path_full.replace(directory + '/', '')
                path_base = self.destination_directory.replace(
                    self.layout_tests_dir + '/', '') + '/' + path_base
                if path_base in paths_to_skip:
                    if not self.options.dry_run and self.import_in_place:
                        _log.info("  pruning %s" % path_base)
                        self.filesystem.remove(path_full)
                        continue
                    else:
                        continue
                # FIXME: This block should really be a separate function, but the early-continues make that difficult.

                if filename.startswith('.') or filename.endswith('.pl'):
                    continue  # For some reason the w3c repo contains random perl scripts we don't care about.

                fullpath = os.path.join(root, filename)

                mimetype = mimetypes.guess_type(fullpath)
                if not 'html' in str(
                        mimetype[0]) and not 'application/xhtml+xml' in str(
                            mimetype[0]) and not 'application/xml' in str(
                                mimetype[0]):
                    copy_list.append({'src': fullpath, 'dest': filename})
                    continue

                if root.endswith('resources'):
                    copy_list.append({'src': fullpath, 'dest': filename})
                    continue

                test_parser = TestParser(vars(self.options), filename=fullpath)
                test_info = test_parser.analyze_test()
                if test_info is None:
                    continue

                if 'reference' in test_info.keys():
                    reftests += 1
                    total_tests += 1
                    test_basename = os.path.basename(test_info['test'])

                    # Add the ref file, following WebKit style.
                    # FIXME: Ideally we'd support reading the metadata
                    # directly rather than relying  on a naming convention.
                    # Using a naming convention creates duplicate copies of the
                    # reference files.
                    ref_file = os.path.splitext(test_basename)[0] + '-expected'
                    ref_file += os.path.splitext(test_basename)[1]

                    copy_list.append({
                        'src':
                        test_info['reference'],
                        'dest':
                        ref_file,
                        'reference_support_info':
                        test_info['reference_support_info']
                    })
                    copy_list.append({
                        'src': test_info['test'],
                        'dest': filename
                    })

                elif 'jstest' in test_info.keys():
                    jstests += 1
                    total_tests += 1
                    copy_list.append({'src': fullpath, 'dest': filename})
                else:
                    total_tests += 1
                    copy_list.append({'src': fullpath, 'dest': filename})

            if copy_list:
                # Only add this directory to the list if there's something to import
                self.import_list.append({
                    'dirname': root,
                    'copy_list': copy_list,
                    'reftests': reftests,
                    'jstests': jstests,
                    'total_tests': total_tests
                })

    def find_paths_to_skip(self):
        if self.options.ignore_expectations:
            return set()

        paths_to_skip = set()
        port = self.host.port_factory.get()
        w3c_import_expectations_path = self.webkit_finder.path_from_webkit_base(
            'LayoutTests', 'W3CImportExpectations')
        w3c_import_expectations = self.filesystem.read_text_file(
            w3c_import_expectations_path)
        parser = TestExpectationParser(port,
                                       full_test_list=(),
                                       is_lint_mode=False)
        expectation_lines = parser.parse(w3c_import_expectations_path,
                                         w3c_import_expectations)
        for line in expectation_lines:
            if 'SKIP' in line.expectations:
                if line.specifiers:
                    _log.warning(
                        "W3CImportExpectations:%s should not have any specifiers"
                        % line.line_numbers)
                    continue
                paths_to_skip.add(line.name)
        return paths_to_skip

    def import_tests(self):
        total_imported_tests = 0
        total_imported_reftests = 0
        total_imported_jstests = 0
        total_prefixed_properties = {}

        for dir_to_copy in self.import_list:
            total_imported_tests += dir_to_copy['total_tests']
            total_imported_reftests += dir_to_copy['reftests']
            total_imported_jstests += dir_to_copy['jstests']

            prefixed_properties = []

            if not dir_to_copy['copy_list']:
                continue

            orig_path = dir_to_copy['dirname']

            subpath = os.path.relpath(orig_path, self.top_of_repo)
            new_path = os.path.join(self.destination_directory, subpath)

            if not (os.path.exists(new_path)):
                os.makedirs(new_path)

            copied_files = []

            for file_to_copy in dir_to_copy['copy_list']:
                # FIXME: Split this block into a separate function.
                orig_filepath = os.path.normpath(file_to_copy['src'])

                if os.path.isdir(orig_filepath):
                    # FIXME: Figure out what is triggering this and what to do about it.
                    _log.error('%s refers to a directory' % orig_filepath)
                    continue

                if not (os.path.exists(orig_filepath)):
                    _log.warning('%s not found. Possible error in the test.',
                                 orig_filepath)
                    continue

                new_filepath = os.path.join(new_path, file_to_copy['dest'])
                if 'reference_support_info' in file_to_copy.keys(
                ) and file_to_copy['reference_support_info'] != {}:
                    reference_support_info = file_to_copy[
                        'reference_support_info']
                else:
                    reference_support_info = None

                if not (os.path.exists(os.path.dirname(new_filepath))):
                    if not self.import_in_place and not self.options.dry_run:
                        os.makedirs(os.path.dirname(new_filepath))

                relpath = os.path.relpath(new_filepath, self.layout_tests_dir)
                if not self.options.overwrite and os.path.exists(new_filepath):
                    _log.info('  skipping %s' % relpath)
                else:
                    # FIXME: Maybe doing a file diff is in order here for existing files?
                    # In other words, there's no sense in overwriting identical files, but
                    # there's no harm in copying the identical thing.
                    _log.info('  %s' % relpath)

                # Only html, xml, or css should be converted
                # FIXME: Eventually, so should js when support is added for this type of conversion
                mimetype = mimetypes.guess_type(orig_filepath)
                if 'html' in str(mimetype[0]) or 'xml' in str(
                        mimetype[0]) or 'css' in str(mimetype[0]):
                    converted_file = convert_for_webkit(
                        new_path,
                        filename=orig_filepath,
                        reference_support_info=reference_support_info)

                    if not converted_file:
                        if not self.import_in_place and not self.options.dry_run:
                            shutil.copyfile(
                                orig_filepath,
                                new_filepath)  # The file was unmodified.
                    else:
                        for prefixed_property in converted_file[0]:
                            total_prefixed_properties.setdefault(
                                prefixed_property, 0)
                            total_prefixed_properties[prefixed_property] += 1

                        prefixed_properties.extend(
                            set(converted_file[0]) - set(prefixed_properties))
                        if not self.options.dry_run:
                            outfile = open(new_filepath, 'wb')
                            outfile.write(converted_file[1])
                            outfile.close()
                else:
                    if not self.import_in_place and not self.options.dry_run:
                        shutil.copyfile(orig_filepath, new_filepath)

                copied_files.append(new_filepath.replace(
                    self._webkit_root, ''))

        _log.info('')
        _log.info('Import complete')
        _log.info('')
        _log.info('IMPORTED %d TOTAL TESTS', total_imported_tests)
        _log.info('Imported %d reftests', total_imported_reftests)
        _log.info('Imported %d JS tests', total_imported_jstests)
        _log.info(
            'Imported %d pixel/manual tests', total_imported_tests -
            total_imported_jstests - total_imported_reftests)
        _log.info('')

        if total_prefixed_properties:
            _log.info('Properties needing prefixes (by count):')
            for prefixed_property in sorted(
                    total_prefixed_properties,
                    key=lambda p: total_prefixed_properties[p]):
                _log.info('  %s: %s', prefixed_property,
                          total_prefixed_properties[prefixed_property])

    def setup_destination_directory(self):
        """ Creates a destination directory that mirrors that of the source directory """

        new_subpath = self.dir_to_import[len(self.top_of_repo):]

        destination_directory = os.path.join(self.destination_directory,
                                             new_subpath)

        if not os.path.exists(destination_directory):
            os.makedirs(destination_directory)

        _log.info('Tests will be imported into: %s', destination_directory)
Exemple #13
0
class TestImporter(object):
    def __init__(self, host, source_repo_path, options):
        self.host = host
        self.source_repo_path = source_repo_path
        self.options = options

        self.filesystem = self.host.filesystem
        self.webkit_finder = WebKitFinder(self.filesystem)
        self._webkit_root = self.webkit_finder.webkit_base()
        self.layout_tests_dir = self.webkit_finder.path_from_webkit_base(
            'LayoutTests')
        self.destination_directory = self.filesystem.normpath(
            self.filesystem.join(
                self.layout_tests_dir, options.destination,
                self.filesystem.basename(self.source_repo_path)))
        self.import_in_place = (
            self.source_repo_path == self.destination_directory)
        self.dir_above_repo = self.filesystem.dirname(self.source_repo_path)

        self.import_list = []

    def do_import(self):
        _log.info("Importing %s into %s", self.source_repo_path,
                  self.destination_directory)
        self.find_importable_tests()
        self.import_tests()

    def find_importable_tests(self):
        """Walks through the source directory to find what tests should be imported.

        This function sets self.import_list, which contains information about how many
        tests are being imported, and their source and destination paths.
        """
        paths_to_skip = self.find_paths_to_skip()

        for root, dirs, files in self.filesystem.walk(self.source_repo_path):
            cur_dir = root.replace(self.dir_above_repo + '/', '') + '/'
            _log.info('  scanning ' + cur_dir + '...')
            total_tests = 0
            reftests = 0
            jstests = 0

            # Files in 'tools' are not for browser testing, so we skip them.
            # See: http://testthewebforward.org/docs/test-format-guidelines.html#tools
            DIRS_TO_SKIP = ('.git', 'test-plan', 'tools')

            # We copy all files in 'support', including HTML without metadata.
            # See: http://testthewebforward.org/docs/test-format-guidelines.html#support-files
            DIRS_TO_INCLUDE = ('resources', 'support')

            if dirs:
                for d in DIRS_TO_SKIP:
                    if d in dirs:
                        dirs.remove(d)

                for path in paths_to_skip:
                    path_base = path.replace(self.options.destination + '/',
                                             '')
                    path_base = path_base.replace(cur_dir, '')
                    path_full = self.filesystem.join(root, path_base)
                    if path_base in dirs:
                        dirs.remove(path_base)
                        if not self.options.dry_run and self.import_in_place:
                            _log.info("  pruning %s", path_base)
                            self.filesystem.rmtree(path_full)
                        else:
                            _log.info("  skipping %s", path_base)

            copy_list = []

            for filename in files:
                path_full = self.filesystem.join(root, filename)
                path_base = path_full.replace(self.source_repo_path + '/', '')
                path_base = self.destination_directory.replace(
                    self.layout_tests_dir + '/', '') + '/' + path_base
                if path_base in paths_to_skip:
                    if not self.options.dry_run and self.import_in_place:
                        _log.info("  pruning %s", path_base)
                        self.filesystem.remove(path_full)
                        continue
                    else:
                        continue
                # FIXME: This block should really be a separate function, but the early-continues make that difficult.

                if filename.startswith('.') or filename.endswith('.pl'):
                    # The w3cs repos may contain perl scripts, which we don't care about.
                    continue
                if filename == 'OWNERS' or filename == 'reftest.list':
                    # These files fail our presubmits.
                    # See http://crbug.com/584660 and http://crbug.com/582838.
                    continue

                fullpath = self.filesystem.join(root, filename)

                mimetype = mimetypes.guess_type(fullpath)
                if ('html' not in str(mimetype[0])
                        and 'application/xhtml+xml' not in str(mimetype[0])
                        and 'application/xml' not in str(mimetype[0])):
                    copy_list.append({'src': fullpath, 'dest': filename})
                    continue

                if self.filesystem.basename(root) in DIRS_TO_INCLUDE:
                    copy_list.append({'src': fullpath, 'dest': filename})
                    continue

                test_parser = TestParser(fullpath, self.host,
                                         vars(self.options))
                test_info = test_parser.analyze_test()
                if test_info is None:
                    continue

                if self.path_too_long(path_full):
                    _log.warning(
                        '%s skipped due to long path. '
                        'Max length from repo base %d chars; see http://crbug.com/609871.',
                        path_full, MAX_PATH_LENGTH)
                    continue

                if 'reference' in test_info.keys():
                    test_basename = self.filesystem.basename(test_info['test'])
                    # Add the ref file, following WebKit style.
                    # FIXME: Ideally we'd support reading the metadata
                    # directly rather than relying  on a naming convention.
                    # Using a naming convention creates duplicate copies of the
                    # reference files.
                    ref_file = self.filesystem.splitext(
                        test_basename)[0] + '-expected'
                    # Make sure to use the extension from the *reference*, not
                    # from the test, because at least flexbox tests use XHTML
                    # references but HTML tests.
                    ref_file += self.filesystem.splitext(
                        test_info['reference'])[1]

                    if self.path_too_long(path_full.replace(
                            filename, ref_file)):
                        _log.warning(
                            '%s skipped because path of ref file %s would be too long. '
                            'Max length from repo base %d chars; see http://crbug.com/609871.',
                            path_full, ref_file, MAX_PATH_LENGTH)
                        continue

                    reftests += 1
                    total_tests += 1
                    copy_list.append({
                        'src':
                        test_info['reference'],
                        'dest':
                        ref_file,
                        'reference_support_info':
                        test_info['reference_support_info']
                    })
                    copy_list.append({
                        'src': test_info['test'],
                        'dest': filename
                    })

                elif 'jstest' in test_info.keys():
                    jstests += 1
                    total_tests += 1
                    copy_list.append({
                        'src': fullpath,
                        'dest': filename,
                        'is_jstest': True
                    })
                else:
                    total_tests += 1
                    copy_list.append({'src': fullpath, 'dest': filename})

            if copy_list:
                # Only add this directory to the list if there's something to import
                self.import_list.append({
                    'dirname': root,
                    'copy_list': copy_list,
                    'reftests': reftests,
                    'jstests': jstests,
                    'total_tests': total_tests
                })

    def find_paths_to_skip(self):
        if self.options.ignore_expectations:
            return set()

        paths_to_skip = set()
        port = self.host.port_factory.get()
        w3c_import_expectations_path = self.webkit_finder.path_from_webkit_base(
            'LayoutTests', 'W3CImportExpectations')
        w3c_import_expectations = self.filesystem.read_text_file(
            w3c_import_expectations_path)
        parser = TestExpectationParser(port, all_tests=(), is_lint_mode=False)
        expectation_lines = parser.parse(w3c_import_expectations_path,
                                         w3c_import_expectations)
        for line in expectation_lines:
            if 'SKIP' in line.expectations:
                if line.specifiers:
                    _log.warning(
                        "W3CImportExpectations:%s should not have any specifiers",
                        line.line_numbers)
                    continue
                paths_to_skip.add(line.name)
        return paths_to_skip

    def import_tests(self):
        """Reads |self.import_list|, and converts and copies files to their destination."""
        total_imported_tests = 0
        total_imported_reftests = 0
        total_imported_jstests = 0
        total_prefixed_properties = {}

        for dir_to_copy in self.import_list:
            total_imported_tests += dir_to_copy['total_tests']
            total_imported_reftests += dir_to_copy['reftests']
            total_imported_jstests += dir_to_copy['jstests']

            prefixed_properties = []

            if not dir_to_copy['copy_list']:
                continue

            orig_path = dir_to_copy['dirname']

            subpath = self.filesystem.relpath(orig_path, self.source_repo_path)
            new_path = self.filesystem.join(self.destination_directory,
                                            subpath)

            if not self.filesystem.exists(new_path):
                self.filesystem.maybe_make_directory(new_path)

            copied_files = []

            for file_to_copy in dir_to_copy['copy_list']:
                # FIXME: Split this block into a separate function.
                orig_filepath = self.filesystem.normpath(file_to_copy['src'])

                if self.filesystem.isdir(orig_filepath):
                    # FIXME: Figure out what is triggering this and what to do about it.
                    _log.error('%s refers to a directory', orig_filepath)
                    continue

                if not self.filesystem.exists(orig_filepath):
                    _log.warning('%s not found. Possible error in the test.',
                                 orig_filepath)
                    continue

                new_filepath = self.filesystem.join(new_path,
                                                    file_to_copy['dest'])
                if 'reference_support_info' in file_to_copy.keys(
                ) and file_to_copy['reference_support_info'] != {}:
                    reference_support_info = file_to_copy[
                        'reference_support_info']
                else:
                    reference_support_info = None

                if not self.filesystem.exists(
                        self.filesystem.dirname(new_filepath)):
                    if not self.import_in_place and not self.options.dry_run:
                        self.filesystem.maybe_make_directory(
                            self.filesystem.dirname(new_filepath))

                relpath = self.filesystem.relpath(new_filepath,
                                                  self.layout_tests_dir)
                if not self.options.overwrite and self.filesystem.exists(
                        new_filepath):
                    _log.info('  skipping %s', relpath)
                else:
                    # FIXME: Maybe doing a file diff is in order here for existing files?
                    # In other words, there's no sense in overwriting identical files, but
                    # there's no harm in copying the identical thing.
                    _log.info('  %s', relpath)

                # Only HTML, XML, or CSS should be converted.
                # FIXME: Eventually, so should JS when support is added for this type of conversion.
                mimetype = mimetypes.guess_type(orig_filepath)
                if 'is_jstest' not in file_to_copy and (
                        'html' in str(mimetype[0]) or 'xml' in str(mimetype[0])
                        or 'css' in str(mimetype[0])):
                    converted_file = convert_for_webkit(
                        new_path,
                        filename=orig_filepath,
                        reference_support_info=reference_support_info)

                    if not converted_file:
                        if not self.import_in_place and not self.options.dry_run:
                            self.filesystem.copyfile(
                                orig_filepath,
                                new_filepath)  # The file was unmodified.
                    else:
                        for prefixed_property in converted_file[0]:
                            total_prefixed_properties.setdefault(
                                prefixed_property, 0)
                            total_prefixed_properties[prefixed_property] += 1

                        prefixed_properties.extend(
                            set(converted_file[0]) - set(prefixed_properties))
                        if not self.options.dry_run:
                            outfile = open(new_filepath, 'wb')
                            outfile.write(converted_file[1].encode('utf-8'))
                            outfile.close()
                else:
                    if not self.import_in_place and not self.options.dry_run:
                        self.filesystem.copyfile(orig_filepath, new_filepath)
                        if self.filesystem.read_binary_file(
                                orig_filepath)[:2] == '#!':
                            self.filesystem.make_executable(new_filepath)

                copied_files.append(new_filepath.replace(
                    self._webkit_root, ''))

        _log.info('')
        _log.info('Import complete')
        _log.info('')
        _log.info('IMPORTED %d TOTAL TESTS', total_imported_tests)
        _log.info('Imported %d reftests', total_imported_reftests)
        _log.info('Imported %d JS tests', total_imported_jstests)
        _log.info(
            'Imported %d pixel/manual tests', total_imported_tests -
            total_imported_jstests - total_imported_reftests)
        _log.info('')

        if total_prefixed_properties:
            _log.info('Properties needing prefixes (by count):')
            for prefixed_property in sorted(
                    total_prefixed_properties,
                    key=lambda p: total_prefixed_properties[p]):
                _log.info('  %s: %s', prefixed_property,
                          total_prefixed_properties[prefixed_property])

    def path_too_long(self, source_path):
        """Checks whether a source path is too long to import.

        Args:
            Absolute path of file to be imported.

        Returns:
            True if the path is too long to import, False if it's OK.
        """
        path_from_repo_base = os.path.relpath(source_path,
                                              self.source_repo_path)
        return len(path_from_repo_base) > MAX_PATH_LENGTH
Exemple #14
0
class DepsUpdater(object):
    def __init__(self, host):
        self.host = host
        self.executive = host.executive
        self.fs = host.filesystem
        self.finder = WebKitFinder(self.fs)
        self.verbose = False
        self.git_cl = None

    def main(self, argv=None):
        options = self.parse_args(argv)
        self.verbose = options.verbose

        if not self.checkout_is_okay(options.allow_local_commits):
            return 1

        self.git_cl = GitCL(
            self.host, auth_refresh_token_json=options.auth_refresh_token_json)

        self.print_('## Noting the current Chromium commit.')
        _, show_ref_output = self.run(['git', 'show-ref', 'HEAD'])
        chromium_commitish = show_ref_output.split()[0]

        if options.target == 'wpt':
            import_commitish = self.update(WPT_DEST_NAME, WPT_REPO_URL,
                                           options.keep_w3c_repos_around,
                                           options.revision)
            self._copy_resources()
        elif options.target == 'css':
            import_commitish = self.update(CSS_DEST_NAME, CSS_REPO_URL,
                                           options.keep_w3c_repos_around,
                                           options.revision)
        else:
            raise AssertionError("Unsupported target %s" % options.target)

        has_changes = self.commit_changes_if_needed(chromium_commitish,
                                                    import_commitish)
        if options.auto_update and has_changes:
            commit_successful = self.do_auto_update()
            if not commit_successful:
                return 1
        return 0

    def parse_args(self, argv):
        parser = argparse.ArgumentParser()
        parser.description = __doc__
        parser.add_argument('-v',
                            '--verbose',
                            action='store_true',
                            help='log what we are doing')
        parser.add_argument(
            '--allow-local-commits',
            action='store_true',
            help='allow script to run even if we have local commits')
        parser.add_argument(
            '--keep-w3c-repos-around',
            action='store_true',
            help='leave the w3c repos around that were imported previously.')
        parser.add_argument('-r',
                            dest='revision',
                            action='store',
                            help='Target revision.')
        parser.add_argument(
            'target',
            choices=['css', 'wpt'],
            help=
            'Target repository.  "css" for csswg-test, "wpt" for web-platform-tests.'
        )
        parser.add_argument('--auto-update',
                            action='store_true',
                            help='uploads CL and initiates commit queue.')
        parser.add_argument('--auth-refresh-token-json',
                            help='Rietveld auth refresh JSON token.')
        return parser.parse_args(argv)

    def checkout_is_okay(self, allow_local_commits):
        git_diff_retcode, _ = self.run(['git', 'diff', '--quiet', 'HEAD'],
                                       exit_on_failure=False)
        if git_diff_retcode:
            self.print_('## Checkout is dirty; aborting.')
            return False

        local_commits = self.run(
            ['git', 'log', '--oneline', 'origin/master..HEAD'])[1]
        if local_commits and not allow_local_commits:
            self.print_(
                '## Checkout has local commits; aborting. Use --allow-local-commits to allow this.'
            )
            return False

        if self.fs.exists(self.path_from_webkit_base(WPT_DEST_NAME)):
            self.print_('## WebKit/%s exists; aborting.' % WPT_DEST_NAME)
            return False

        if self.fs.exists(self.path_from_webkit_base(CSS_DEST_NAME)):
            self.print_('## WebKit/%s repo exists; aborting.' % CSS_DEST_NAME)
            return False

        return True

    def _copy_resources(self):
        """Copies resources from LayoutTests/resources to wpt and vice versa.

        There are resources from our repository that we use instead of the
        upstream versions. Conversely, there are also some resources that
        are copied in the other direction.

        Specifically:
          - testharnessreport.js contains code needed to integrate our testing
            with testharness.js; we also want our code to be used for tests
            in wpt.
          - TODO(qyearsley, jsbell): Document why other other files are copied,
            or stop copying them if it's unnecessary.

        If this method is changed, the lists of files expected to be identical
        in LayoutTests/PRESUBMIT.py should also be changed.
        """
        resources_to_copy_to_wpt = [
            ('testharnessreport.js', 'resources'),
            ('WebIDLParser.js', 'resources'),
            ('vendor-prefix.js', 'common'),
        ]
        resources_to_copy_from_wpt = [
            ('idlharness.js', 'resources'),
            ('testharness.js', 'resources'),
        ]
        for filename, wpt_subdir in resources_to_copy_to_wpt:
            source = self.path_from_webkit_base('LayoutTests', 'resources',
                                                filename)
            destination = self.path_from_webkit_base('LayoutTests', 'imported',
                                                     WPT_DEST_NAME, wpt_subdir,
                                                     filename)
            self.copyfile(source, destination)
            self.run(['git', 'add', destination])
        for filename, wpt_subdir in resources_to_copy_from_wpt:
            source = self.path_from_webkit_base('LayoutTests', 'imported',
                                                WPT_DEST_NAME, wpt_subdir,
                                                filename)
            destination = self.path_from_webkit_base('LayoutTests',
                                                     'resources', filename)
            self.copyfile(source, destination)
            self.run(['git', 'add', destination])

    def update(self, dest_dir_name, url, keep_w3c_repos_around, revision):
        """Updates an imported repository.

        Args:
            dest_dir_name: The destination directory name.
            url: URL of the git repository.
            revision: Commit hash or None.

        Returns:
            A string for the commit description "<destination>@<commitish>".
        """
        temp_repo_path = self.path_from_webkit_base(dest_dir_name)
        self.print_('## Cloning %s into %s.' % (url, temp_repo_path))
        self.run(['git', 'clone', url, temp_repo_path])

        if revision is not None:
            self.print_('## Checking out %s' % revision)
            self.run(['git', 'checkout', revision], cwd=temp_repo_path)
        self.run(['git', 'submodule', 'update', '--init', '--recursive'],
                 cwd=temp_repo_path)

        self.print_('## Noting the revision we are importing.')
        _, show_ref_output = self.run(['git', 'show-ref', 'origin/master'],
                                      cwd=temp_repo_path)
        master_commitish = show_ref_output.split()[0]

        self.print_('## Cleaning out tests from LayoutTests/imported/%s.' %
                    dest_dir_name)
        dest_path = self.path_from_webkit_base('LayoutTests', 'imported',
                                               dest_dir_name)
        files_to_delete = self.fs.files_under(dest_path,
                                              file_filter=self.is_not_baseline)
        for subpath in files_to_delete:
            self.remove('LayoutTests', 'imported', subpath)

        self.print_('## Importing the tests.')
        src_repo = self.path_from_webkit_base(dest_dir_name)
        import_path = self.path_from_webkit_base('Tools', 'Scripts',
                                                 'import-w3c-tests')
        self.run(
            [self.host.executable, import_path, '-d', 'imported', src_repo])

        self.run(
            ['git', 'add', '--all',
             'LayoutTests/imported/%s' % dest_dir_name])

        self.print_('## Deleting manual tests.')
        files_to_delete = self.fs.files_under(dest_path,
                                              file_filter=self.is_manual_test)
        for subpath in files_to_delete:
            self.remove('LayoutTests', 'imported', subpath)

        self.print_('## Deleting any orphaned baselines.')
        previous_baselines = self.fs.files_under(dest_path,
                                                 file_filter=self.is_baseline)
        for subpath in previous_baselines:
            full_path = self.fs.join(dest_path, subpath)
            if self.fs.glob(full_path.replace('-expected.txt',
                                              '*')) == [full_path]:
                self.fs.remove(full_path)

        if not keep_w3c_repos_around:
            self.print_('## Deleting temp repo directory %s.' % temp_repo_path)
            self.rmtree(temp_repo_path)

        self.print_(
            '## Updating TestExpectations for any removed or renamed tests.')
        self.update_test_expectations(self._list_deleted_tests(),
                                      self._list_renamed_tests())

        return '%s@%s' % (dest_dir_name, master_commitish)

    def commit_changes_if_needed(self, chromium_commitish, import_commitish):
        if self.run(['git', 'diff', '--quiet', 'HEAD'],
                    exit_on_failure=False)[0]:
            self.print_('## Committing changes.')
            commit_msg = ('Import %s\n'
                          '\n'
                          'Using update-w3c-deps in Chromium %s.\n' %
                          (import_commitish, chromium_commitish))
            path_to_commit_msg = self.path_from_webkit_base('commit_msg')
            if self.verbose:
                self.print_('cat > %s <<EOF' % path_to_commit_msg)
                self.print_(commit_msg)
                self.print_('EOF')
            self.fs.write_text_file(path_to_commit_msg, commit_msg)
            self.run(['git', 'commit', '-a', '-F', path_to_commit_msg])
            self.remove(path_to_commit_msg)
            self.print_('## Done: changes imported and committed.')
            return True
        else:
            self.print_('## Done: no changes to import.')
            return False

    def is_manual_test(self, fs, dirname, basename):
        """Returns True if the file should be removed because it's a manual test.

        Tests with "-manual" in the name are not considered manual tests
        if there is a corresponding JS automation file.
        """
        basename_without_extension, _ = self.fs.splitext(basename)
        if not basename_without_extension.endswith('-manual'):
            return False
        dir_from_wpt = fs.relpath(
            dirname,
            self.path_from_webkit_base('LayoutTests', 'imported', 'wpt'))
        automation_dir = self.path_from_webkit_base('LayoutTests', 'imported',
                                                    'wpt_automation',
                                                    dir_from_wpt)
        if fs.isfile(
                fs.join(automation_dir,
                        '%s-automation.js' % basename_without_extension)):
            return False
        return True

    # Callback for FileSystem.files_under; not all arguments used - pylint: disable=unused-argument
    def is_baseline(self, fs, dirname, basename):
        return basename.endswith('-expected.txt')

    def is_not_baseline(self, fs, dirname, basename):
        return not self.is_baseline(fs, dirname, basename)

    def run(self, cmd, exit_on_failure=True, cwd=None):
        if self.verbose:
            self.print_(' '.join(cmd))

        cwd = cwd or self.finder.webkit_base()
        proc = self.executive.popen(cmd,
                                    stdout=self.executive.PIPE,
                                    stderr=self.executive.PIPE,
                                    cwd=cwd)
        out, err = proc.communicate()
        if proc.returncode or self.verbose:
            self.print_('# ret> %d' % proc.returncode)
            if out:
                for line in out.splitlines():
                    self.print_('# out> %s' % line)
            if err:
                for line in err.splitlines():
                    self.print_('# err> %s' % line)
        if exit_on_failure and proc.returncode:
            self.host.exit(proc.returncode)
        return proc.returncode, out

    def check_run(self, command):
        return_code, out = self.run(command)
        if return_code:
            raise Exception('%s failed with exit code %d.' % ' '.join(command),
                            return_code)
        return out

    def copyfile(self, source, destination):
        if self.verbose:
            self.print_('cp %s %s' % (source, destination))
        self.fs.copyfile(source, destination)

    def remove(self, *comps):
        dest = self.path_from_webkit_base(*comps)
        if self.verbose:
            self.print_('rm %s' % dest)
        self.fs.remove(dest)

    def rmtree(self, *comps):
        dest = self.path_from_webkit_base(*comps)
        if self.verbose:
            self.print_('rm -fr %s' % dest)
        self.fs.rmtree(dest)

    def path_from_webkit_base(self, *comps):
        return self.finder.path_from_webkit_base(*comps)

    def print_(self, msg):
        self.host.print_(msg)

    def do_auto_update(self):
        """Attempts to upload a CL, make any required adjustments, and commit.

        This function assumes that the imported repo has already been updated,
        and that change has been committed. There may be newly-failing tests,
        so before being able to commit these new changes, we may need to update
        TestExpectations or download new baselines.

        Returns:
            True if successfully committed, False otherwise.
        """
        self._upload_cl()
        self.print_('## ' + self.git_cl.run(['issue']).strip())

        # First try: if there are failures, update expectations.
        self.print_('## Triggering try jobs.')
        for try_bot in self.host.builders.all_try_builder_names():
            self.git_cl.run(['try', '-b', try_bot])
        try_results = self.git_cl.wait_for_try_jobs()
        if not try_results:
            self.print_('## Timed out waiting for try results.')
            return
        if try_results and self.git_cl.has_failing_try_results(try_results):
            self.fetch_new_expectations_and_baselines()

        # Second try: if there are failures, then abort.
        self.git_cl.run(['set-commit', '--rietveld'])
        try_results = self.git_cl.wait_for_try_jobs()
        if not try_results:
            self.print_('Timed out waiting for try results.')
            self.git_cl.run(['set-close'])
            return False
        if self.git_cl.has_failing_try_results(try_results):
            self.print_('CQ failed; aborting.')
            self.git_cl.run(['set-close'])
            return False
        self.print_('## Update completed.')
        return True

    def _upload_cl(self):
        self.print_('## Uploading change list.')
        cc_list = self.get_directory_owners_to_cc()
        last_commit_message = self.check_run(
            ['git', 'log', '-1', '--format=%B'])
        commit_message = last_commit_message + '[email protected]'
        self.git_cl.run([
            'upload',
            '-f',
            '--rietveld',
            '-m',
            commit_message,
        ] + ['--cc=' + email for email in cc_list])

    def get_directory_owners_to_cc(self):
        """Returns a list of email addresses to CC for the current import."""
        self.print_('## Gathering directory owners emails to CC.')
        directory_owners_file_path = self.finder.path_from_webkit_base(
            'Tools', 'Scripts', 'webkitpy', 'w3c', 'directory_owners.json')
        with open(directory_owners_file_path) as data_file:
            directory_to_owner = self.parse_directory_owners(
                json.load(data_file))
        out = self.check_run(['git', 'diff', 'origin/master', '--name-only'])
        changed_files = out.splitlines()
        return self.generate_email_list(changed_files, directory_to_owner)

    @staticmethod
    def parse_directory_owners(decoded_data_file):
        directory_dict = {}
        for dict_set in decoded_data_file:
            if dict_set['notification-email']:
                directory_dict[
                    dict_set['directory']] = dict_set['notification-email']
        return directory_dict

    def generate_email_list(self, changed_files, directory_to_owner):
        """Returns a list of email addresses based on the given file list and
        directory-to-owner mapping.

        Args:
            changed_files: A list of file paths relative to the repository root.
            directory_to_owner: A dict mapping layout test directories to emails.

        Returns:
            A list of the email addresses to be notified for the current import.
        """
        email_addresses = set()
        for file_path in changed_files:
            test_path = self.finder.layout_test_name(file_path)
            if test_path is None:
                continue
            test_dir = self.fs.dirname(test_path)
            if test_dir in directory_to_owner:
                email_addresses.add(directory_to_owner[test_dir])
        return sorted(email_addresses)

    def fetch_new_expectations_and_baselines(self):
        """Adds new expectations and downloads baselines based on try job results, then commits and uploads the change."""
        self.print_(
            '## Adding test expectations lines to LayoutTests/TestExpectations.'
        )
        script_path = self.path_from_webkit_base(
            'Tools', 'Scripts', 'update-w3c-test-expectations')
        self.run([self.host.executable, script_path, '--verbose'])
        message = 'Modify TestExpectations or download new baselines for tests.'
        self.check_run(['git', 'commit', '-a', '-m', message])
        self.git_cl.run(['upload', '-m', message, '--rietveld'])

    def update_test_expectations(self, deleted_tests, renamed_tests):
        """Updates the TestExpectations file entries for tests that have been deleted or renamed."""
        port = self.host.port_factory.get()
        test_expectations = TestExpectations(port, include_overrides=False)
        # Tests for which files don't exist aren't stored in TestExpectationsModel,
        # so methods like TestExpectations.remove_expectation_line don't work; instead
        # we can run through the TestExpectationLine objects that were parsed.
        # FIXME: This won't work for removed or renamed directories with test expectations
        # that are directories rather than individual tests.
        new_lines = []
        changed_lines = []
        for expectation_line in test_expectations.expectations():
            if expectation_line.name in deleted_tests:
                continue
            if expectation_line.name in renamed_tests:
                expectation_line.name = renamed_tests[expectation_line.name]
                # Upon parsing the file, a "path does not exist" warning is expected
                # to be there for tests that have been renamed, and if there are warnings,
                # then the original string is used. If the warnings are reset, then the
                # expectation line is re-serialized when output.
                expectation_line.warnings = []
                changed_lines.append(expectation_line)
            new_lines.append(expectation_line)
        self.host.filesystem.write_text_file(
            port.path_to_generic_test_expectations_file(),
            TestExpectations.list_to_string(
                new_lines, reconstitute_only_these=changed_lines))

    def _list_deleted_tests(self):
        """Returns a list of layout tests that have been deleted."""
        out = self.check_run(
            ['git', 'diff', 'origin/master', '--diff-filter=D', '--name-only'])
        deleted_tests = []
        for line in out.splitlines():
            test = self.finder.layout_test_name(line)
            if test:
                deleted_tests.append(test)
        return deleted_tests

    def _list_renamed_tests(self):
        """Returns a dict mapping source to dest name for layout tests that have been renamed."""
        out = self.check_run([
            'git', 'diff', 'origin/master', '--diff-filter=R', '--name-status'
        ])
        renamed_tests = {}
        for line in out.splitlines():
            _, source_path, dest_path = line.split()
            source_test = self.finder.layout_test_name(source_path)
            dest_test = self.finder.layout_test_name(dest_path)
            if source_test and dest_test:
                renamed_tests[source_test] = dest_test
        return renamed_tests
Exemple #15
0
class W3CExpectationsLineAdder(object):

    def __init__(self, host):
        self.host = host
        self.host.initialize_scm()
        self.finder = WebKitFinder(self.host.filesystem)

    def run(self, args=None):
        parser = argparse.ArgumentParser(description=__doc__)
        parser.add_argument('-v', '--verbose', action='store_true', help='More verbose logging.')
        args = parser.parse_args(args)
        log_level = logging.DEBUG if args.verbose else logging.INFO
        logging.basicConfig(level=log_level, format='%(message)s')

        issue_number = self.get_issue_number()
        if issue_number == 'None':
            _log.error('No issue on current branch.')
            return 1

        rietveld = Rietveld(self.host.web)
        builds = rietveld.latest_try_jobs(issue_number, self.get_try_bots())
        _log.debug('Latest try jobs: %r', builds)

        if not builds:
            _log.error('No try job information was collected.')
            return 1

        test_expectations = {}
        for build in builds:
            platform_results = self.get_failing_results_dict(build)
            test_expectations = self.merge_dicts(test_expectations, platform_results)

        for test_name, platform_result in test_expectations.iteritems():
            test_expectations[test_name] = self.merge_same_valued_keys(platform_result)

        test_expectations = self.get_expected_txt_files(test_expectations)
        test_expectation_lines = self.create_line_list(test_expectations)
        self.write_to_test_expectations(test_expectation_lines)
        return 0

    def get_issue_number(self):
        return GitCL(self.host).get_issue_number()

    def get_try_bots(self):
        return self.host.builders.all_try_builder_names()

    def generate_results_dict(self, platform, result_list):
        test_dict = {}
        if '-' in platform:
            platform = platform[platform.find('-') + 1:].capitalize()
        for result in result_list:
            test_dict[result.test_name()] = {
                platform: {
                    'expected': result.expected_results(),
                    'actual': result.actual_results(),
                    'bug': 'crbug.com/626703'
                }}
        return test_dict

    def get_failing_results_dict(self, build):
        """Returns a nested dict of failing test results.

        Retrieves a full list of layout test results from a builder result URL.
        Collects the builder name, platform and a list of tests that did not
        run as expected.

        Args:
            build: A Build object.

        Returns:
            A dictionary with the structure: {
                'key': {
                    'expected': 'TIMEOUT',
                    'actual': 'CRASH',
                    'bug': 'crbug.com/11111'
                }
            }
            If there are no failing results or no results could be fetched,
            this will return an empty dict.
        """
        layout_test_results = self.host.buildbot.fetch_results(build)
        if layout_test_results is None:
            _log.warning('No results for build %s', build)
            return {}
        platform = self.host.builders.port_name_for_builder_name(build.builder_name)
        result_list = layout_test_results.didnt_run_as_expected_results()
        failing_results_dict = self.generate_results_dict(platform, result_list)
        return failing_results_dict

    def merge_dicts(self, target, source, path=None):
        """Recursively merges nested dictionaries.

        Args:
            target: First dictionary, which is updated based on source.
            source: Second dictionary, not modified.

        Returns:
            An updated target dictionary.
        """
        path = path or []
        for key in source:
            if key in target:
                if (isinstance(target[key], dict)) and isinstance(source[key], dict):
                    self.merge_dicts(target[key], source[key], path + [str(key)])
                elif target[key] == source[key]:
                    pass
                else:
                    raise ValueError('The key: %s already exist in the target dictionary.' % '.'.join(path))
            else:
                target[key] = source[key]
        return target

    def merge_same_valued_keys(self, dictionary):
        """Merges keys in dictionary with same value.

        Traverses through a dict and compares the values of keys to one another.
        If the values match, the keys are combined to a tuple and the previous
        keys are removed from the dict.

        Args:
            dictionary: A dictionary with a dictionary as the value.

        Returns:
            A new dictionary with updated keys to reflect matching values of keys.
            Example: {
                'one': {'foo': 'bar'},
                'two': {'foo': 'bar'},
                'three': {'foo': 'bar'}
            }
            is converted to a new dictionary with that contains
            {('one', 'two', 'three'): {'foo': 'bar'}}
        """
        merged_dict = {}
        matching_value_keys = set()
        keys = sorted(dictionary.keys())
        while keys:
            current_key = keys[0]
            found_match = False
            if current_key == keys[-1]:
                merged_dict[current_key] = dictionary[current_key]
                keys.remove(current_key)
                break

            for next_item in keys[1:]:
                if dictionary[current_key] == dictionary[next_item]:
                    found_match = True
                    matching_value_keys.update([current_key, next_item])

                if next_item == keys[-1]:
                    if found_match:
                        merged_dict[tuple(matching_value_keys)] = dictionary[current_key]
                        keys = [k for k in keys if k not in matching_value_keys]
                    else:
                        merged_dict[current_key] = dictionary[current_key]
                        keys.remove(current_key)
            matching_value_keys = set()
        return merged_dict

    def get_expectations(self, results):
        """Returns a set of test expectations for a given test dict.

        Returns a set of one or more test expectations based on the expected
        and actual results of a given test name.

        Args:
            results: A dictionary that maps one test to its results. Example:
                {
                    'test_name': {
                        'expected': 'PASS',
                        'actual': 'FAIL',
                        'bug': 'crbug.com/11111'
                    }
                }

        Returns:
            A set of one or more test expectation strings with the first letter
            capitalized. Example: set(['Failure', 'Timeout']).
        """
        expectations = set()
        failure_types = ['TEXT', 'FAIL', 'IMAGE+TEXT', 'IMAGE', 'AUDIO', 'MISSING', 'LEAK']
        test_expectation_types = ['SLOW', 'TIMEOUT', 'CRASH', 'PASS', 'REBASELINE', 'NEEDSREBASELINE', 'NEEDSMANUALREBASELINE']
        for expected in results['expected'].split():
            for actual in results['actual'].split():
                if expected in test_expectation_types and actual in failure_types:
                    expectations.add('Failure')
                if expected in failure_types and actual in test_expectation_types:
                    expectations.add(actual.capitalize())
                if expected in test_expectation_types and actual in test_expectation_types:
                    expectations.add(actual.capitalize())
        return expectations

    def create_line_list(self, merged_results):
        """Creates list of test expectations lines.

        Traverses through the given |merged_results| dictionary and parses the
        value to create one test expectations line per key.

        Args:
            merged_results: A merged_results with the format:
                {
                    'test_name': {
                        'platform': {
                            'expected: 'PASS',
                            'actual': 'FAIL',
                            'bug': 'crbug.com/11111'
                        }
                    }
                }

        Returns:
            A list of test expectations lines with the format:
            ['BUG_URL [PLATFORM(S)] TEST_MAME [EXPECTATION(S)]']
        """
        line_list = []
        for test_name, platform_results in merged_results.iteritems():
            for platform in platform_results:
                if test_name.startswith('imported'):
                    platform_list = []
                    bug = []
                    expectations = []
                    if isinstance(platform, tuple):
                        platform_list = list(platform)
                    else:
                        platform_list.append(platform)
                    bug.append(platform_results[platform]['bug'])
                    expectations = self.get_expectations(platform_results[platform])
                    line = '%s [ %s ] %s [ %s ]' % (bug[0], ' '.join(platform_list), test_name, ' '.join(expectations))
                    line_list.append(str(line))
        return line_list

    def write_to_test_expectations(self, line_list):
        """Writes to TestExpectations.

        The place in the file where the new lines are inserted is after a
        marker comment line. If this marker comment line is not found, it will
        be added to the end of the file.

        Args:
            line_list: A list of lines to add to the TestExpectations file.
        """
        _log.debug('Lines to write to TestExpectations: %r', line_list)
        port = self.host.port_factory.get()
        expectations_file_path = port.path_to_generic_test_expectations_file()
        file_contents = self.host.filesystem.read_text_file(expectations_file_path)
        marker_comment_index = file_contents.find(MARKER_COMMENT)
        line_list = [line for line in line_list if self._test_name_from_expectation_string(line) not in file_contents]
        if not line_list:
            return
        if marker_comment_index == -1:
            file_contents += '\n%s\n' % MARKER_COMMENT
            file_contents += '\n'.join(line_list)
        else:
            end_of_marker_line = (file_contents[marker_comment_index:].find('\n')) + marker_comment_index
            file_contents = file_contents[:end_of_marker_line + 1] + '\n'.join(line_list) + file_contents[end_of_marker_line:]
        self.host.filesystem.write_text_file(expectations_file_path, file_contents)

    @staticmethod
    def _test_name_from_expectation_string(expectation_string):
        return TestExpectationLine.tokenize_line(filename='', expectation_string=expectation_string, line_number=0).name

    def get_expected_txt_files(self, tests_results):
        """Fetches new baseline files for tests that should be rebaselined.

        Invokes webkit-patch rebaseline-from-try-jobs in order to download new
        -expected.txt files for testharness.js tests that did not crash or time
        out. Then, the platform-specific test is removed from the overall
        failure test dictionary.

        Args:
            tests_results: A dict mapping test name to platform to test results.

        Returns:
            An updated tests_results dictionary without the platform-specific
            testharness.js tests that required new baselines to be downloaded
            from `webkit-patch rebaseline-from-try-jobs`.
        """
        modified_tests = self.get_modified_existing_tests()
        tests_to_rebaseline, tests_results = self.get_tests_to_rebaseline(modified_tests, tests_results)
        _log.debug('Tests to rebaseline: %r', tests_to_rebaseline)
        if tests_to_rebaseline:
            webkit_patch = self.host.filesystem.join(
                self.finder.chromium_base(), self.finder.webkit_base(), self.finder.path_to_script('webkit-patch'))
            self.host.executive.run_command([
                'python',
                webkit_patch,
                'rebaseline-cl',
                '--verbose',
                '--no-trigger-jobs',
            ] + tests_to_rebaseline)
        return tests_results

    def get_modified_existing_tests(self):
        """Returns a list of layout test names for layout tests that have been modified."""
        diff_output = self.host.executive.run_command(
            ['git', 'diff', 'origin/master', '--name-only', '--diff-filter=AMR'])  # Added, modified, and renamed files.
        paths_from_chromium_root = diff_output.splitlines()
        modified_tests = []
        for path in paths_from_chromium_root:
            absolute_path = self.host.filesystem.join(self.finder.chromium_base(), path)
            if not self.host.filesystem.exists(absolute_path):
                _log.warning('File does not exist: %s', absolute_path)
                continue
            test_path = self.finder.layout_test_name(path)
            if test_path:
                modified_tests.append(test_path)
        return modified_tests

    def get_tests_to_rebaseline(self, modified_tests, test_results):
        """Returns a list of tests to download new baselines for.

        Creates a list of tests to rebaseline depending on the tests' platform-
        specific results. In general, this will be non-ref tests that failed
        due to a baseline mismatch (rather than crash or timeout).

        Args:
            modified_tests: A list of paths to modified files (which should
                be added, removed or modified files in the imported w3c
                directory), relative to the LayoutTests directory.
            test_results: A dictionary of failing tests results.

        Returns:
            A pair: A set of tests to be rebaselined, and a modified copy of
            the test results dictionary. The tests to be rebaselined should include
            testharness.js tests that failed due to a baseline mismatch.
        """
        test_results = copy.deepcopy(test_results)
        tests_to_rebaseline = set()
        for test_path in modified_tests:
            if not (self.is_js_test(test_path) and test_results.get(test_path)):
                continue
            for platform in test_results[test_path].keys():
                if test_results[test_path][platform]['actual'] not in ['CRASH', 'TIMEOUT']:
                    del test_results[test_path][platform]
                    tests_to_rebaseline.add(test_path)
        return sorted(tests_to_rebaseline), test_results

    def is_js_test(self, test_path):
        """Checks whether a given file is a testharness.js test.

        Args:
            test_path: A file path relative to the layout tests directory.
                This might correspond to a deleted file or a non-test.
        """
        absolute_path = self.host.filesystem.join(self.finder.layout_tests_dir(), test_path)
        test_parser = TestParser(absolute_path, self.host)
        if not test_parser.test_doc:
            return False
        return test_parser.is_jstest()
Exemple #16
0
class TestImporter(object):
    def __init__(self, host, source_repo_path, options):
        self.host = host
        self.source_repo_path = source_repo_path
        self.options = options

        self.filesystem = self.host.filesystem
        self.webkit_finder = WebKitFinder(self.filesystem)
        self._webkit_root = self.webkit_finder.webkit_base()
        self.layout_tests_dir = self.webkit_finder.path_from_webkit_base("LayoutTests")
        self.destination_directory = self.filesystem.normpath(
            self.filesystem.join(
                self.layout_tests_dir, options.destination, self.filesystem.basename(self.source_repo_path)
            )
        )
        self.import_in_place = self.source_repo_path == self.destination_directory
        self.dir_above_repo = self.filesystem.dirname(self.source_repo_path)

        self.import_list = []

    def do_import(self):
        _log.info("Importing %s into %s", self.source_repo_path, self.destination_directory)
        self.find_importable_tests()
        self.import_tests()

    def find_importable_tests(self):
        """Walks through the source directory to find what tests should be imported.

        This function sets self.import_list, which contains information about how many
        tests are being imported, and their source and destination paths.
        """
        paths_to_skip = self.find_paths_to_skip()

        for root, dirs, files in self.filesystem.walk(self.source_repo_path):
            cur_dir = root.replace(self.dir_above_repo + "/", "") + "/"
            _log.info("  scanning " + cur_dir + "...")
            total_tests = 0
            reftests = 0
            jstests = 0

            # Files in 'tools' are not for browser testing, so we skip them.
            # See: http://testthewebforward.org/docs/test-format-guidelines.html#tools
            DIRS_TO_SKIP = (".git", "test-plan", "tools")

            # We copy all files in 'support', including HTML without metadata.
            # See: http://testthewebforward.org/docs/test-format-guidelines.html#support-files
            DIRS_TO_INCLUDE = ("resources", "support")

            if dirs:
                for d in DIRS_TO_SKIP:
                    if d in dirs:
                        dirs.remove(d)

                for path in paths_to_skip:
                    path_base = path.replace(self.options.destination + "/", "")
                    path_base = path_base.replace(cur_dir, "")
                    path_full = self.filesystem.join(root, path_base)
                    if path_base in dirs:
                        dirs.remove(path_base)
                        if not self.options.dry_run and self.import_in_place:
                            _log.info("  pruning %s", path_base)
                            self.filesystem.rmtree(path_full)
                        else:
                            _log.info("  skipping %s", path_base)

            copy_list = []

            for filename in files:
                path_full = self.filesystem.join(root, filename)
                path_base = path_full.replace(self.source_repo_path + "/", "")
                path_base = self.destination_directory.replace(self.layout_tests_dir + "/", "") + "/" + path_base
                if path_base in paths_to_skip:
                    if not self.options.dry_run and self.import_in_place:
                        _log.info("  pruning %s", path_base)
                        self.filesystem.remove(path_full)
                        continue
                    else:
                        continue
                # FIXME: This block should really be a separate function, but the early-continues make that difficult.

                if filename.startswith(".") or filename.endswith(".pl"):
                    # The w3cs repos may contain perl scripts, which we don't care about.
                    continue
                if filename == "OWNERS" or filename == "reftest.list":
                    # These files fail our presubmits.
                    # See http://crbug.com/584660 and http://crbug.com/582838.
                    continue

                fullpath = self.filesystem.join(root, filename)

                mimetype = mimetypes.guess_type(fullpath)
                if (
                    "html" not in str(mimetype[0])
                    and "application/xhtml+xml" not in str(mimetype[0])
                    and "application/xml" not in str(mimetype[0])
                ):
                    copy_list.append({"src": fullpath, "dest": filename})
                    continue

                if self.filesystem.basename(root) in DIRS_TO_INCLUDE:
                    copy_list.append({"src": fullpath, "dest": filename})
                    continue

                test_parser = TestParser(fullpath, self.host, vars(self.options))
                test_info = test_parser.analyze_test()
                if test_info is None:
                    continue

                if self.path_too_long(path_full):
                    _log.warning(
                        "%s skipped due to long path. "
                        "Max length from repo base %d chars; see http://crbug.com/609871.",
                        path_full,
                        MAX_PATH_LENGTH,
                    )
                    continue

                if "reference" in test_info.keys():
                    test_basename = self.filesystem.basename(test_info["test"])
                    # Add the ref file, following WebKit style.
                    # FIXME: Ideally we'd support reading the metadata
                    # directly rather than relying  on a naming convention.
                    # Using a naming convention creates duplicate copies of the
                    # reference files.
                    ref_file = self.filesystem.splitext(test_basename)[0] + "-expected"
                    # Make sure to use the extension from the *reference*, not
                    # from the test, because at least flexbox tests use XHTML
                    # references but HTML tests.
                    ref_file += self.filesystem.splitext(test_info["reference"])[1]

                    if self.path_too_long(path_full.replace(filename, ref_file)):
                        _log.warning(
                            "%s skipped because path of ref file %s would be too long. "
                            "Max length from repo base %d chars; see http://crbug.com/609871.",
                            path_full,
                            ref_file,
                            MAX_PATH_LENGTH,
                        )
                        continue

                    reftests += 1
                    total_tests += 1
                    copy_list.append(
                        {
                            "src": test_info["reference"],
                            "dest": ref_file,
                            "reference_support_info": test_info["reference_support_info"],
                        }
                    )
                    copy_list.append({"src": test_info["test"], "dest": filename})

                elif "jstest" in test_info.keys():
                    jstests += 1
                    total_tests += 1
                    copy_list.append({"src": fullpath, "dest": filename, "is_jstest": True})
                else:
                    total_tests += 1
                    copy_list.append({"src": fullpath, "dest": filename})

            if copy_list:
                # Only add this directory to the list if there's something to import
                self.import_list.append(
                    {
                        "dirname": root,
                        "copy_list": copy_list,
                        "reftests": reftests,
                        "jstests": jstests,
                        "total_tests": total_tests,
                    }
                )

    def find_paths_to_skip(self):
        if self.options.ignore_expectations:
            return set()

        paths_to_skip = set()
        port = self.host.port_factory.get()
        w3c_import_expectations_path = self.webkit_finder.path_from_webkit_base("LayoutTests", "W3CImportExpectations")
        w3c_import_expectations = self.filesystem.read_text_file(w3c_import_expectations_path)
        parser = TestExpectationParser(port, all_tests=(), is_lint_mode=False)
        expectation_lines = parser.parse(w3c_import_expectations_path, w3c_import_expectations)
        for line in expectation_lines:
            if "SKIP" in line.expectations:
                if line.specifiers:
                    _log.warning("W3CImportExpectations:%s should not have any specifiers", line.line_numbers)
                    continue
                paths_to_skip.add(line.name)
        return paths_to_skip

    def import_tests(self):
        """Reads |self.import_list|, and converts and copies files to their destination."""
        total_imported_tests = 0
        total_imported_reftests = 0
        total_imported_jstests = 0
        total_prefixed_properties = {}

        for dir_to_copy in self.import_list:
            total_imported_tests += dir_to_copy["total_tests"]
            total_imported_reftests += dir_to_copy["reftests"]
            total_imported_jstests += dir_to_copy["jstests"]

            prefixed_properties = []

            if not dir_to_copy["copy_list"]:
                continue

            orig_path = dir_to_copy["dirname"]

            subpath = self.filesystem.relpath(orig_path, self.source_repo_path)
            new_path = self.filesystem.join(self.destination_directory, subpath)

            if not self.filesystem.exists(new_path):
                self.filesystem.maybe_make_directory(new_path)

            copied_files = []

            for file_to_copy in dir_to_copy["copy_list"]:
                # FIXME: Split this block into a separate function.
                orig_filepath = self.filesystem.normpath(file_to_copy["src"])

                if self.filesystem.isdir(orig_filepath):
                    # FIXME: Figure out what is triggering this and what to do about it.
                    _log.error("%s refers to a directory", orig_filepath)
                    continue

                if not self.filesystem.exists(orig_filepath):
                    _log.warning("%s not found. Possible error in the test.", orig_filepath)
                    continue

                new_filepath = self.filesystem.join(new_path, file_to_copy["dest"])
                if "reference_support_info" in file_to_copy.keys() and file_to_copy["reference_support_info"] != {}:
                    reference_support_info = file_to_copy["reference_support_info"]
                else:
                    reference_support_info = None

                if not self.filesystem.exists(self.filesystem.dirname(new_filepath)):
                    if not self.import_in_place and not self.options.dry_run:
                        self.filesystem.maybe_make_directory(self.filesystem.dirname(new_filepath))

                relpath = self.filesystem.relpath(new_filepath, self.layout_tests_dir)
                if not self.options.overwrite and self.filesystem.exists(new_filepath):
                    _log.info("  skipping %s", relpath)
                else:
                    # FIXME: Maybe doing a file diff is in order here for existing files?
                    # In other words, there's no sense in overwriting identical files, but
                    # there's no harm in copying the identical thing.
                    _log.info("  %s", relpath)

                # Only HTML, XML, or CSS should be converted.
                # FIXME: Eventually, so should JS when support is added for this type of conversion.
                mimetype = mimetypes.guess_type(orig_filepath)
                if "is_jstest" not in file_to_copy and (
                    "html" in str(mimetype[0]) or "xml" in str(mimetype[0]) or "css" in str(mimetype[0])
                ):
                    converted_file = convert_for_webkit(
                        new_path, filename=orig_filepath, reference_support_info=reference_support_info
                    )

                    if not converted_file:
                        if not self.import_in_place and not self.options.dry_run:
                            self.filesystem.copyfile(orig_filepath, new_filepath)  # The file was unmodified.
                    else:
                        for prefixed_property in converted_file[0]:
                            total_prefixed_properties.setdefault(prefixed_property, 0)
                            total_prefixed_properties[prefixed_property] += 1

                        prefixed_properties.extend(set(converted_file[0]) - set(prefixed_properties))
                        if not self.options.dry_run:
                            outfile = open(new_filepath, "wb")
                            outfile.write(converted_file[1].encode("utf-8"))
                            outfile.close()
                else:
                    if not self.import_in_place and not self.options.dry_run:
                        self.filesystem.copyfile(orig_filepath, new_filepath)
                        if self.filesystem.read_binary_file(orig_filepath)[:2] == "#!":
                            self.filesystem.make_executable(new_filepath)

                copied_files.append(new_filepath.replace(self._webkit_root, ""))

        _log.info("")
        _log.info("Import complete")
        _log.info("")
        _log.info("IMPORTED %d TOTAL TESTS", total_imported_tests)
        _log.info("Imported %d reftests", total_imported_reftests)
        _log.info("Imported %d JS tests", total_imported_jstests)
        _log.info(
            "Imported %d pixel/manual tests", total_imported_tests - total_imported_jstests - total_imported_reftests
        )
        _log.info("")

        if total_prefixed_properties:
            _log.info("Properties needing prefixes (by count):")
            for prefixed_property in sorted(total_prefixed_properties, key=lambda p: total_prefixed_properties[p]):
                _log.info("  %s: %s", prefixed_property, total_prefixed_properties[prefixed_property])

    def path_too_long(self, source_path):
        """Checks whether a source path is too long to import.

        Args:
            Absolute path of file to be imported.

        Returns:
            True if the path is too long to import, False if it's OK.
        """
        path_from_repo_base = os.path.relpath(source_path, self.source_repo_path)
        return len(path_from_repo_base) > MAX_PATH_LENGTH
class DepsUpdater(object):
    def __init__(self, host):
        self.host = host
        self.executive = host.executive
        self.fs = host.filesystem
        self.finder = WebKitFinder(self.fs)
        self.verbose = False
        self.git_cl = None

    def main(self, argv=None):
        options = self.parse_args(argv)
        self.verbose = options.verbose
        log_level = logging.DEBUG if self.verbose else logging.INFO
        logging.basicConfig(level=log_level, format='%(message)s')

        if not self.checkout_is_okay(options.allow_local_commits):
            return 1

        self.git_cl = GitCL(
            self.host, auth_refresh_token_json=options.auth_refresh_token_json)

        _log.info('Noting the current Chromium commit.')
        _, show_ref_output = self.run(['git', 'show-ref', 'HEAD'])
        chromium_commit = show_ref_output.split()[0]

        if options.target == 'wpt':
            import_commit = self.update(WPT_DEST_NAME, WPT_REPO_URL,
                                        options.keep_w3c_repos_around,
                                        options.revision)
            self._copy_resources()
        elif options.target == 'css':
            import_commit = self.update(CSS_DEST_NAME, CSS_REPO_URL,
                                        options.keep_w3c_repos_around,
                                        options.revision)
        else:
            raise AssertionError("Unsupported target %s" % options.target)

        has_changes = self._has_changes()
        if not has_changes:
            _log.info('Done: no changes to import.')
            return 0

        commit_message = self._commit_message(chromium_commit, import_commit)
        self._commit_changes(commit_message)
        _log.info('Done: changes imported and committed.')

        if options.auto_update:
            commit_successful = self.do_auto_update()
            if not commit_successful:
                return 1
        return 0

    def parse_args(self, argv):
        parser = argparse.ArgumentParser()
        parser.description = __doc__
        parser.add_argument('-v',
                            '--verbose',
                            action='store_true',
                            help='log what we are doing')
        parser.add_argument(
            '--allow-local-commits',
            action='store_true',
            help='allow script to run even if we have local commits')
        parser.add_argument(
            '--keep-w3c-repos-around',
            action='store_true',
            help='leave the w3c repos around that were imported previously.')
        parser.add_argument('-r',
                            dest='revision',
                            action='store',
                            help='Target revision.')
        parser.add_argument(
            'target',
            choices=['css', 'wpt'],
            help=
            'Target repository.  "css" for csswg-test, "wpt" for web-platform-tests.'
        )
        parser.add_argument('--auto-update',
                            action='store_true',
                            help='uploads CL and initiates commit queue.')
        parser.add_argument('--auth-refresh-token-json',
                            help='Rietveld auth refresh JSON token.')
        return parser.parse_args(argv)

    def checkout_is_okay(self, allow_local_commits):
        git_diff_retcode, _ = self.run(['git', 'diff', '--quiet', 'HEAD'],
                                       exit_on_failure=False)
        if git_diff_retcode:
            _log.warning('Checkout is dirty; aborting.')
            return False

        local_commits = self.run(
            ['git', 'log', '--oneline', 'origin/master..HEAD'])[1]
        if local_commits and not allow_local_commits:
            _log.warning(
                'Checkout has local commits; aborting. Use --allow-local-commits to allow this.'
            )
            return False

        if self.fs.exists(self.path_from_webkit_base(WPT_DEST_NAME)):
            _log.warning('WebKit/%s exists; aborting.', WPT_DEST_NAME)
            return False

        if self.fs.exists(self.path_from_webkit_base(CSS_DEST_NAME)):
            _log.warning('WebKit/%s repo exists; aborting.', CSS_DEST_NAME)
            return False

        return True

    def _copy_resources(self):
        """Copies resources from LayoutTests/resources to wpt and vice versa.

        There are resources from our repository that we use instead of the
        upstream versions. Conversely, there are also some resources that
        are copied in the other direction.

        Specifically:
          - testharnessreport.js contains code needed to integrate our testing
            with testharness.js; we also want our code to be used for tests
            in wpt.
          - TODO(qyearsley, jsbell): Document why other other files are copied,
            or stop copying them if it's unnecessary.

        If this method is changed, the lists of files expected to be identical
        in LayoutTests/PRESUBMIT.py should also be changed.
        """
        # TODO(tkent): resources_to_copy_to_wpt is unnecessary after enabling
        # WPTServe.
        resources_to_copy_to_wpt = [
            ('testharnessreport.js', 'resources'),
            ('WebIDLParser.js', 'resources'),
            ('vendor-prefix.js', 'common'),
        ]
        resources_to_copy_from_wpt = [
            ('idlharness.js', 'resources'),
            ('testharness.js', 'resources'),
        ]
        for filename, wpt_subdir in resources_to_copy_to_wpt:
            source = self.path_from_webkit_base('LayoutTests', 'resources',
                                                filename)
            destination = self.path_from_webkit_base('LayoutTests', 'external',
                                                     WPT_DEST_NAME, wpt_subdir,
                                                     filename)
            self.copyfile(source, destination)
            self.run(['git', 'add', destination])
        for filename, wpt_subdir in resources_to_copy_from_wpt:
            source = self.path_from_webkit_base('LayoutTests', 'external',
                                                WPT_DEST_NAME, wpt_subdir,
                                                filename)
            destination = self.path_from_webkit_base('LayoutTests',
                                                     'resources', filename)
            self.copyfile(source, destination)
            self.run(['git', 'add', destination])

    def _generate_manifest(self, dest_path):
        """Generates MANIFEST.json for imported tests.

        Args:
            dest_path: Path to the destination WPT directory.

        Runs the (newly-updated) manifest command if it's found, and then
        stages the generated MANIFEST.json in the git index, ready to commit.
        """
        manifest_command = self.finder.path_from_webkit_base(
            'Tools', 'Scripts', 'webkitpy', 'thirdparty', 'wpt', 'wpt',
            'manifest')
        if 'css' in dest_path:
            # Do nothing for csswg-test.
            return
        _log.info('Generating MANIFEST.json')
        self.run([manifest_command, '--work', '--tests-root', dest_path])
        self.run(['git', 'add', self.fs.join(dest_path, 'MANIFEST.json')])

    def update(self, dest_dir_name, url, keep_w3c_repos_around, revision):
        """Updates an imported repository.

        Args:
            dest_dir_name: The destination directory name.
            url: URL of the git repository.
            revision: Commit hash or None.

        Returns:
            A string for the commit description "<destination>@<commitish>".
        """
        temp_repo_path = self.path_from_webkit_base(dest_dir_name)
        _log.info('Cloning %s into %s.', url, temp_repo_path)
        self.run(['git', 'clone', url, temp_repo_path])

        if revision is not None:
            _log.info('Checking out %s', revision)
            self.run(['git', 'checkout', revision], cwd=temp_repo_path)

        self.run(['git', 'submodule', 'update', '--init', '--recursive'],
                 cwd=temp_repo_path)

        _log.info('Noting the revision we are importing.')
        _, show_ref_output = self.run(['git', 'show-ref', 'origin/master'],
                                      cwd=temp_repo_path)
        master_commitish = show_ref_output.split()[0]

        _log.info('Cleaning out tests from LayoutTests/external/%s.',
                  dest_dir_name)
        dest_path = self.path_from_webkit_base('LayoutTests', 'external',
                                               dest_dir_name)
        is_not_baseline_filter = lambda fs, dirname, basename: not self.is_baseline(
            basename)
        files_to_delete = self.fs.files_under(
            dest_path, file_filter=is_not_baseline_filter)
        for subpath in files_to_delete:
            self.remove('LayoutTests', 'external', subpath)

        _log.info('Importing the tests.')
        test_importer = TestImporter(self.host, temp_repo_path)
        test_importer.do_import()

        self.run(
            ['git', 'add', '--all',
             'LayoutTests/external/%s' % dest_dir_name])

        _log.info('Deleting any orphaned baselines.')

        is_baseline_filter = lambda fs, dirname, basename: self.is_baseline(
            basename)
        previous_baselines = self.fs.files_under(
            dest_path, file_filter=is_baseline_filter)

        for subpath in previous_baselines:
            full_path = self.fs.join(dest_path, subpath)
            if self.fs.glob(full_path.replace('-expected.txt',
                                              '*')) == [full_path]:
                self.fs.remove(full_path)

        self._generate_manifest(dest_path)

        if not keep_w3c_repos_around:
            _log.info('Deleting temp repo directory %s.', temp_repo_path)
            self.rmtree(temp_repo_path)

        _log.info(
            'Updating TestExpectations for any removed or renamed tests.')
        self.update_all_test_expectations_files(self._list_deleted_tests(),
                                                self._list_renamed_tests())

        return '%s@%s' % (dest_dir_name, master_commitish)

    def _commit_changes(self, commit_message):
        _log.info('Committing changes.')
        self.run(['git', 'commit', '--all', '-F', '-'], stdin=commit_message)

    def _has_changes(self):
        return_code, _ = self.run(['git', 'diff', '--quiet', 'HEAD'],
                                  exit_on_failure=False)
        return return_code == 1

    def _commit_message(self, chromium_commit, import_commit):
        return ('Import %s\n\nUsing update-w3c-deps in Chromium %s.\n\n' %
                (import_commit, chromium_commit))

    @staticmethod
    def is_baseline(basename):
        return basename.endswith('-expected.txt')

    def run(self, cmd, exit_on_failure=True, cwd=None, stdin=''):
        _log.debug('Running command: %s', ' '.join(cmd))

        cwd = cwd or self.finder.webkit_base()
        proc = self.executive.popen(cmd,
                                    stdout=self.executive.PIPE,
                                    stderr=self.executive.PIPE,
                                    stdin=self.executive.PIPE,
                                    cwd=cwd)
        out, err = proc.communicate(stdin)
        if proc.returncode or self.verbose:
            _log.info('# ret> %d', proc.returncode)
            if out:
                for line in out.splitlines():
                    _log.info('# out> %s', line)
            if err:
                for line in err.splitlines():
                    _log.info('# err> %s', line)
        if exit_on_failure and proc.returncode:
            self.host.exit(proc.returncode)
        return proc.returncode, out

    def check_run(self, command):
        return_code, out = self.run(command)
        if return_code:
            raise Exception('%s failed with exit code %d.' % ' '.join(command),
                            return_code)
        return out

    def copyfile(self, source, destination):
        _log.debug('cp %s %s', source, destination)
        self.fs.copyfile(source, destination)

    def remove(self, *comps):
        dest = self.path_from_webkit_base(*comps)
        _log.debug('rm %s', dest)
        self.fs.remove(dest)

    def rmtree(self, *comps):
        dest = self.path_from_webkit_base(*comps)
        _log.debug('rm -fr %s', dest)
        self.fs.rmtree(dest)

    def path_from_webkit_base(self, *comps):
        return self.finder.path_from_webkit_base(*comps)

    def do_auto_update(self):
        """Attempts to upload a CL, make any required adjustments, and commit.

        This function assumes that the imported repo has already been updated,
        and that change has been committed. There may be newly-failing tests,
        so before being able to commit these new changes, we may need to update
        TestExpectations or download new baselines.

        Returns:
            True if successfully committed, False otherwise.
        """
        self._upload_cl()
        _log.info('Issue: %s', self.git_cl.run(['issue']).strip())

        # First, try on Blink try bots in order to get any new baselines.
        _log.info('Triggering try jobs.')
        for try_bot in self.host.builders.all_try_builder_names():
            self.git_cl.run(['try', '-b', try_bot])
        try_results = self.git_cl.wait_for_try_jobs(
            poll_delay_seconds=POLL_DELAY_SECONDS,
            timeout_seconds=TIMEOUT_SECONDS)

        if not try_results:
            self.git_cl.run(['set-close'])
            return False

        if try_results and self.git_cl.has_failing_try_results(try_results):
            self.fetch_new_expectations_and_baselines()

        # Wait for CQ try jobs to finish. If there are failures, then abort.
        self.git_cl.run(['set-commit', '--rietveld'])
        try_results = self.git_cl.wait_for_try_jobs(
            poll_delay_seconds=POLL_DELAY_SECONDS,
            timeout_seconds=TIMEOUT_SECONDS)

        if not try_results:
            self.git_cl.run(['set-close'])
            return False

        if self.git_cl.has_failing_try_results(try_results):
            _log.info('CQ failed; aborting.')
            self.git_cl.run(['set-close'])
            return False

        _log.info('Update completed.')
        return True

    def _upload_cl(self):
        _log.info('Uploading change list.')
        cc_list = self.get_directory_owners_to_cc()
        description = self._cl_description()
        self.git_cl.run([
            'upload',
            '-f',
            '--rietveld',
            '-m',
            description,
        ] + ['--cc=' + email for email in cc_list])

    def _cl_description(self):
        description = self.check_run(['git', 'log', '-1', '--format=%B'])
        build_link = self._build_link()
        if build_link:
            description += 'Build: %s\n\n' % build_link
        description += '[email protected]\n'
        description += 'NOEXPORT=true'
        return description

    def _build_link(self):
        """Returns a link to a job, if running on buildbot."""
        master_name = self.host.environ.get('BUILDBOT_MASTERNAME')
        builder_name = self.host.environ.get('BUILDBOT_BUILDERNAME')
        build_number = self.host.environ.get('BUILDBOT_BUILDNUMBER')
        if not (master_name and builder_name and build_number):
            return None
        return 'https://build.chromium.org/p/%s/builders/%s/builds/%s' % (
            master_name, builder_name, build_number)

    def get_directory_owners_to_cc(self):
        """Returns a list of email addresses to CC for the current import."""
        _log.info('Gathering directory owners emails to CC.')
        directory_owners_file_path = self.finder.path_from_webkit_base(
            'Tools', 'Scripts', 'webkitpy', 'w3c', 'directory_owners.json')
        with open(directory_owners_file_path) as data_file:
            directory_to_owner = self.parse_directory_owners(
                json.load(data_file))
        out = self.check_run(['git', 'diff', 'origin/master', '--name-only'])
        changed_files = out.splitlines()
        return self.generate_email_list(changed_files, directory_to_owner)

    @staticmethod
    def parse_directory_owners(decoded_data_file):
        directory_dict = {}
        for dict_set in decoded_data_file:
            if dict_set['notification-email']:
                directory_dict[
                    dict_set['directory']] = dict_set['notification-email']
        return directory_dict

    def generate_email_list(self, changed_files, directory_to_owner):
        """Returns a list of email addresses based on the given file list and
        directory-to-owner mapping.

        Args:
            changed_files: A list of file paths relative to the repository root.
            directory_to_owner: A dict mapping layout test directories to emails.

        Returns:
            A list of the email addresses to be notified for the current import.
        """
        email_addresses = set()
        for file_path in changed_files:
            test_path = self.finder.layout_test_name(file_path)
            if test_path is None:
                continue
            test_dir = self.fs.dirname(test_path)
            if test_dir in directory_to_owner:
                email_addresses.add(directory_to_owner[test_dir])
        return sorted(email_addresses)

    def fetch_new_expectations_and_baselines(self):
        """Adds new expectations and downloads baselines based on try job results, then commits and uploads the change."""
        _log.info(
            'Adding test expectations lines to LayoutTests/TestExpectations.')
        script_path = self.path_from_webkit_base(
            'Tools', 'Scripts', 'update-w3c-test-expectations')
        self.run([self.host.executable, script_path, '--verbose'])
        message = 'Modify TestExpectations or download new baselines for tests.'
        self.check_run(['git', 'commit', '-a', '-m', message])
        self.git_cl.run(['upload', '-m', message, '--rietveld'])

    def update_all_test_expectations_files(self, deleted_tests, renamed_tests):
        """Updates all test expectations files for tests that have been deleted or renamed."""
        port = self.host.port_factory.get()
        for path, file_contents in port.all_expectations_dict().iteritems():

            parser = TestExpectationParser(port,
                                           all_tests=None,
                                           is_lint_mode=False)
            expectation_lines = parser.parse(path, file_contents)
            self._update_single_test_expectations_file(path, expectation_lines,
                                                       deleted_tests,
                                                       renamed_tests)

    def _update_single_test_expectations_file(self, path, expectation_lines,
                                              deleted_tests, renamed_tests):
        """Updates single test expectations file."""
        # FIXME: This won't work for removed or renamed directories with test expectations
        # that are directories rather than individual tests.
        new_lines = []
        changed_lines = []
        for expectation_line in expectation_lines:
            if expectation_line.name in deleted_tests:
                continue
            if expectation_line.name in renamed_tests:
                expectation_line.name = renamed_tests[expectation_line.name]
                # Upon parsing the file, a "path does not exist" warning is expected
                # to be there for tests that have been renamed, and if there are warnings,
                # then the original string is used. If the warnings are reset, then the
                # expectation line is re-serialized when output.
                expectation_line.warnings = []
                changed_lines.append(expectation_line)
            new_lines.append(expectation_line)
        new_file_contents = TestExpectations.list_to_string(
            new_lines, reconstitute_only_these=changed_lines)
        self.host.filesystem.write_text_file(path, new_file_contents)

    def _list_deleted_tests(self):
        """Returns a list of layout tests that have been deleted."""
        out = self.check_run([
            'git', 'diff', 'origin/master', '-M100%', '--diff-filter=D',
            '--name-only'
        ])
        deleted_tests = []
        for line in out.splitlines():
            test = self.finder.layout_test_name(line)
            if test:
                deleted_tests.append(test)
        return deleted_tests

    def _list_renamed_tests(self):
        """Returns a dict mapping source to dest name for layout tests that have been renamed."""
        out = self.check_run([
            'git', 'diff', 'origin/master', '-M100%', '--diff-filter=R',
            '--name-status'
        ])
        renamed_tests = {}
        for line in out.splitlines():
            _, source_path, dest_path = line.split()
            source_test = self.finder.layout_test_name(source_path)
            dest_test = self.finder.layout_test_name(dest_path)
            if source_test and dest_test:
                renamed_tests[source_test] = dest_test
        return renamed_tests
Exemple #18
0
class TestImporter(object):
    def __init__(self, host):
        self.host = host
        self.executive = host.executive
        self.fs = host.filesystem
        self.finder = WebKitFinder(self.fs)
        self.verbose = False
        self.git_cl = None

    def main(self, argv=None):
        options = self.parse_args(argv)
        self.verbose = options.verbose
        log_level = logging.DEBUG if self.verbose else logging.INFO
        logging.basicConfig(level=log_level, format='%(message)s')

        if not self.checkout_is_okay(options.allow_local_commits):
            return 1

        self.git_cl = GitCL(
            self.host, auth_refresh_token_json=options.auth_refresh_token_json)

        _log.debug('Noting the current Chromium commit.')
        _, show_ref_output = self.run(['git', 'show-ref', 'HEAD'])
        chromium_commit = show_ref_output.split()[0]

        assert options.target in ('wpt', 'css')
        dest_dir_name = WPT_DEST_NAME
        repo_url = WPT_REPO_URL
        if options.target != 'wpt':
            dest_dir_name = CSS_DEST_NAME
            repo_url = CSS_REPO_URL

        # TODO(qyearsley): Simplify this to use LocalWPT.fetch when csswg-test
        # is merged into web-platform-tests.
        temp_repo_path = self.path_from_webkit_base(dest_dir_name)
        _log.info('Cloning repo: %s', repo_url)
        _log.info('Local path: %s', temp_repo_path)
        self.run(['git', 'clone', repo_url, temp_repo_path])

        if options.target == 'wpt' and not options.ignore_exportable_commits:
            commits = self.exportable_but_not_exported_commits(temp_repo_path)
            if commits:
                # If there are exportable commits, then there's no more work
                # to do for now. This isn't really an error case; we expect
                # to hit this case some of the time.
                _log.info(
                    'There were exportable but not-yet-exported commits:')
                for commit in commits:
                    _log.info(
                        '  https://chromium.googlesource.com/chromium/src/+/%s',
                        commit.sha)
                _log.info(
                    'Aborting import to prevent clobbering these commits.')
                self.clean_up_temp_repo(temp_repo_path)
                return 0

        import_commit = self.update(dest_dir_name, temp_repo_path,
                                    options.revision)

        self.clean_up_temp_repo(temp_repo_path)

        if options.target == 'wpt':
            self._copy_resources()

        has_changes = self._has_changes()
        if not has_changes:
            _log.info('Done: no changes to import.')
            return 0

        commit_message = self._commit_message(chromium_commit, import_commit)
        self._commit_changes(commit_message)
        _log.info('Done: changes imported and committed.')

        if options.auto_update:
            commit_successful = self.do_auto_update()
            if not commit_successful:
                return 1
        return 0

    def parse_args(self, argv):
        parser = argparse.ArgumentParser()
        parser.description = __doc__
        parser.add_argument('-v',
                            '--verbose',
                            action='store_true',
                            help='log what we are doing')
        parser.add_argument(
            '--allow-local-commits',
            action='store_true',
            help='allow script to run even if we have local commits')
        parser.add_argument('-r',
                            dest='revision',
                            action='store',
                            help='Target revision.')
        parser.add_argument(
            'target',
            choices=['css', 'wpt'],
            help=
            'Target repository.  "css" for csswg-test, "wpt" for web-platform-tests.'
        )
        parser.add_argument('--auto-update',
                            action='store_true',
                            help='uploads CL and initiates commit queue.')
        parser.add_argument('--auth-refresh-token-json',
                            help='authentication refresh token JSON file, '
                            'used for authentication for try jobs, '
                            'generally not necessary on developer machines')
        parser.add_argument(
            '--ignore-exportable-commits',
            action='store_true',
            help=
            'Continue even if there are exportable commits that may be overwritten.'
        )
        return parser.parse_args(argv)

    def checkout_is_okay(self, allow_local_commits):
        git_diff_retcode, _ = self.run(['git', 'diff', '--quiet', 'HEAD'],
                                       exit_on_failure=False)
        if git_diff_retcode:
            _log.warning('Checkout is dirty; aborting.')
            return False

        local_commits = self.run(
            ['git', 'log', '--oneline', 'origin/master..HEAD'])[1]
        if local_commits and not allow_local_commits:
            _log.warning(
                'Checkout has local commits; aborting. Use --allow-local-commits to allow this.'
            )
            return False

        if self.fs.exists(self.path_from_webkit_base(WPT_DEST_NAME)):
            _log.warning('WebKit/%s exists; aborting.', WPT_DEST_NAME)
            return False

        if self.fs.exists(self.path_from_webkit_base(CSS_DEST_NAME)):
            _log.warning('WebKit/%s repo exists; aborting.', CSS_DEST_NAME)
            return False

        return True

    def exportable_but_not_exported_commits(self, wpt_path):
        """Checks for commits that might be overwritten by importing.

        Args:
            wpt_path: The path to a local checkout of web-platform-tests.

        Returns:
            A list of commits in the Chromium repo that are exportable
            but not yet exported to the web-platform-tests repo.
        """
        local_wpt = LocalWPT(self.host, path=wpt_path)
        assert self.host.filesystem.exists(wpt_path)
        _, chromium_commit = local_wpt.most_recent_chromium_commit()
        return exportable_commits_since(chromium_commit.sha, self.host,
                                        local_wpt)

    def clean_up_temp_repo(self, temp_repo_path):
        _log.info('Deleting temp repo directory %s.', temp_repo_path)
        self.rmtree(temp_repo_path)

    def _copy_resources(self):
        """Copies resources from wpt to LayoutTests/resources.

        We copy idlharness.js and testharness.js in wpt to LayoutTests/resources
        in order to use them in non-imported tests.

        If this method is changed, the lists of files expected to be identical
        in LayoutTests/PRESUBMIT.py should also be changed.
        """
        resources_to_copy_from_wpt = [
            ('idlharness.js', 'resources'),
            ('testharness.js', 'resources'),
        ]
        for filename, wpt_subdir in resources_to_copy_from_wpt:
            source = self.path_from_webkit_base('LayoutTests', 'external',
                                                WPT_DEST_NAME, wpt_subdir,
                                                filename)
            destination = self.path_from_webkit_base('LayoutTests',
                                                     'resources', filename)
            self.copyfile(source, destination)
            self.run(['git', 'add', destination])

    def _generate_manifest(self, dest_path):
        """Generates MANIFEST.json for imported tests.

        Args:
            dest_path: Path to the destination WPT directory.

        Runs the (newly-updated) manifest command if it's found, and then
        stages the generated MANIFEST.json in the git index, ready to commit.
        """
        if 'css' in dest_path:
            # Do nothing for csswg-test.
            return
        _log.info('Generating MANIFEST.json')
        WPTManifest.generate_manifest(self.host, dest_path)
        manifest_path = self.fs.join(dest_path, 'MANIFEST.json')
        assert self.fs.exists(manifest_path)
        manifest_base_path = self.fs.normpath(
            self.fs.join(dest_path, '..', 'WPT_BASE_MANIFEST.json'))
        self.copyfile(manifest_path, manifest_base_path)
        self.run(['git', 'add', manifest_base_path])

    def update(self, dest_dir_name, temp_repo_path, revision):
        """Updates an imported repository.

        Args:
            dest_dir_name: The destination directory name.
            temp_repo_path: Path to local checkout of W3C test repo.
            revision: A W3C test repo commit hash, or None.

        Returns:
            A string for the commit description "<destination>@<commitish>".
        """
        if revision is not None:
            _log.info('Checking out %s', revision)
            self.run(['git', 'checkout', revision], cwd=temp_repo_path)

        self.run(['git', 'submodule', 'update', '--init', '--recursive'],
                 cwd=temp_repo_path)

        _log.info('Noting the revision we are importing.')
        _, show_ref_output = self.run(['git', 'show-ref', 'origin/master'],
                                      cwd=temp_repo_path)
        master_commitish = show_ref_output.split()[0]

        _log.info('Cleaning out tests from LayoutTests/external/%s.',
                  dest_dir_name)
        dest_path = self.path_from_webkit_base('LayoutTests', 'external',
                                               dest_dir_name)
        is_not_baseline_filter = lambda fs, dirname, basename: not self.is_baseline(
            basename)
        files_to_delete = self.fs.files_under(
            dest_path, file_filter=is_not_baseline_filter)
        for subpath in files_to_delete:
            self.remove('LayoutTests', 'external', subpath)

        _log.info('Importing the tests.')
        test_copier = TestCopier(self.host, temp_repo_path)
        test_copier.do_import()

        self.run(
            ['git', 'add', '--all',
             'LayoutTests/external/%s' % dest_dir_name])

        self._delete_orphaned_baselines(dest_path)

        self._generate_manifest(dest_path)

        _log.info(
            'Updating TestExpectations for any removed or renamed tests.')
        self.update_all_test_expectations_files(self._list_deleted_tests(),
                                                self._list_renamed_tests())

        return '%s@%s' % (dest_dir_name, master_commitish)

    def _commit_changes(self, commit_message):
        _log.info('Committing changes.')
        self.run(['git', 'commit', '--all', '-F', '-'], stdin=commit_message)

    def _has_changes(self):
        return_code, _ = self.run(['git', 'diff', '--quiet', 'HEAD'],
                                  exit_on_failure=False)
        return return_code == 1

    def _commit_message(self, chromium_commit, import_commit):
        return ('Import %s\n\n'
                'Using wpt-import in Chromium %s.\n\n'
                'NOEXPORT=true' % (import_commit, chromium_commit))

    def _delete_orphaned_baselines(self, dest_path):
        _log.info('Deleting any orphaned baselines.')
        is_baseline_filter = lambda fs, dirname, basename: self.is_baseline(
            basename)
        previous_baselines = self.fs.files_under(
            dest_path, file_filter=is_baseline_filter)
        for sub_path in previous_baselines:
            full_baseline_path = self.fs.join(dest_path, sub_path)
            if not self._has_corresponding_test(full_baseline_path):
                self.fs.remove(full_baseline_path)

    def _has_corresponding_test(self, full_baseline_path):
        base = full_baseline_path.replace('-expected.txt', '')
        return any(
            self.fs.exists(base + ext)
            for ext in Port.supported_file_extensions)

    @staticmethod
    def is_baseline(basename):
        # TODO(qyearsley): Find a better, centralized place for this.
        # Also, the name for this method should be is_text_baseline.
        return basename.endswith('-expected.txt')

    def run(self, cmd, exit_on_failure=True, cwd=None, stdin=''):
        _log.debug('Running command: %s', ' '.join(cmd))

        cwd = cwd or self.finder.webkit_base()
        proc = self.executive.popen(cmd,
                                    stdout=self.executive.PIPE,
                                    stderr=self.executive.PIPE,
                                    stdin=self.executive.PIPE,
                                    cwd=cwd)
        out, err = proc.communicate(stdin)
        if proc.returncode or self.verbose:
            _log.info('# ret> %d', proc.returncode)
            if out:
                for line in out.splitlines():
                    _log.info('# out> %s', line)
            if err:
                for line in err.splitlines():
                    _log.info('# err> %s', line)
        if exit_on_failure and proc.returncode:
            self.host.exit(proc.returncode)
        return proc.returncode, out

    def check_run(self, command):
        return_code, out = self.run(command)
        if return_code:
            raise Exception('%s failed with exit code %d.' % ' '.join(command),
                            return_code)
        return out

    def copyfile(self, source, destination):
        _log.debug('cp %s %s', source, destination)
        self.fs.copyfile(source, destination)

    def remove(self, *comps):
        dest = self.path_from_webkit_base(*comps)
        _log.debug('rm %s', dest)
        self.fs.remove(dest)

    def rmtree(self, *comps):
        dest = self.path_from_webkit_base(*comps)
        _log.debug('rm -fr %s', dest)
        self.fs.rmtree(dest)

    def path_from_webkit_base(self, *comps):
        return self.finder.path_from_webkit_base(*comps)

    def do_auto_update(self):
        """Attempts to upload a CL, make any required adjustments, and commit.

        This function assumes that the imported repo has already been updated,
        and that change has been committed. There may be newly-failing tests,
        so before being able to commit these new changes, we may need to update
        TestExpectations or download new baselines.

        Returns:
            True if successfully committed, False otherwise.
        """
        self._upload_cl()
        _log.info('Issue: %s', self.git_cl.run(['issue']).strip())

        # First, try on Blink try bots in order to get any new baselines.
        # TODO(qyearsley): Make this faster by triggering all try jobs in
        # one invocation.
        _log.info('Triggering try jobs.')
        self.git_cl.trigger_try_jobs()
        try_results = self.git_cl.wait_for_try_jobs(
            poll_delay_seconds=POLL_DELAY_SECONDS,
            timeout_seconds=TIMEOUT_SECONDS)

        if not try_results:
            self.git_cl.run(['set-close'])
            return False

        if try_results and self.git_cl.has_failing_try_results(try_results):
            self.fetch_new_expectations_and_baselines()

        # Trigger CQ and wait for CQ try jobs to finish.
        self.git_cl.run(['set-commit', '--gerrit'])
        try_results = self.git_cl.wait_for_try_jobs(
            poll_delay_seconds=POLL_DELAY_SECONDS,
            timeout_seconds=TIMEOUT_SECONDS)

        if not try_results:
            _log.error('No try job results.')
            self.git_cl.run(['set-close'])
            return False

        # If the CQ passes, then the issue will be closed.
        status = self.git_cl.run(['status' '--field', 'status']).strip()
        _log.info('CL status: "%s"', status)
        if status not in ('lgtm', 'closed'):
            _log.error('CQ appears to have failed; aborting.')
            self.git_cl.run(['set-close'])
            return False

        _log.info('Update completed.')
        return True

    def _upload_cl(self):
        _log.info('Uploading change list.')
        directory_owners = self.get_directory_owners()
        description = self._cl_description(directory_owners)
        self.git_cl.run([
            'upload',
            '-f',
            '--gerrit',
            '-m',
            description,
        ] + self._cc_part(directory_owners))

    @staticmethod
    def _cc_part(directory_owners):
        cc_part = []
        for owner_tuple in sorted(directory_owners):
            cc_part.extend('--cc=' + owner for owner in owner_tuple)
        return cc_part

    def get_directory_owners(self):
        """Returns a mapping of email addresses to owners of changed tests."""
        _log.info('Gathering directory owners emails to CC.')
        changed_files = self.host.git().changed_files()
        extractor = DirectoryOwnersExtractor(self.fs)
        extractor.read_owner_map()
        return extractor.list_owners(changed_files)

    def _cl_description(self, directory_owners):
        """Returns a CL description string.

        Args:
            directory_owners: A dict of tuples of owner names to lists of directories.
        """
        description = self.check_run(['git', 'log', '-1', '--format=%B'])
        build_link = current_build_link(self.host)
        if build_link:
            description += 'Build: %s\n\n' % build_link

        if directory_owners:
            description += self._format_directory_owners(
                directory_owners) + '\n\n'
        description += '[email protected]\n'

        # Move any NOEXPORT tag to the end of the description.
        description = description.replace('NOEXPORT=true', '')
        description = description.replace('\n\n\n\n', '\n\n')
        description += 'NOEXPORT=true'
        return description

    @staticmethod
    def _format_directory_owners(directory_owners):
        message_lines = ['Directory owners for changes in this CL:']
        for owner_tuple, directories in sorted(directory_owners.items()):
            message_lines.append(', '.join(owner_tuple) + ':')
            message_lines.extend('  ' + d for d in directories)
        return '\n'.join(message_lines)

    def fetch_new_expectations_and_baselines(self):
        """Adds new expectations and downloads baselines based on try job results, then commits and uploads the change."""
        _log.info(
            'Adding test expectations lines to LayoutTests/TestExpectations.')
        expectation_updater = WPTExpectationsUpdater(self.host)
        expectation_updater.run(args=[])
        message = 'Update test expectations and baselines.'
        self.check_run(['git', 'commit', '-a', '-m', message])
        self.git_cl.run(['upload', '-t', message, '--gerrit'])

    def update_all_test_expectations_files(self, deleted_tests, renamed_tests):
        """Updates all test expectations files for tests that have been deleted or renamed."""
        port = self.host.port_factory.get()
        for path, file_contents in port.all_expectations_dict().iteritems():
            parser = TestExpectationParser(port,
                                           all_tests=None,
                                           is_lint_mode=False)
            expectation_lines = parser.parse(path, file_contents)
            self._update_single_test_expectations_file(path, expectation_lines,
                                                       deleted_tests,
                                                       renamed_tests)

    def _update_single_test_expectations_file(self, path, expectation_lines,
                                              deleted_tests, renamed_tests):
        """Updates single test expectations file."""
        # FIXME: This won't work for removed or renamed directories with test expectations
        # that are directories rather than individual tests.
        new_lines = []
        changed_lines = []
        for expectation_line in expectation_lines:
            if expectation_line.name in deleted_tests:
                continue
            if expectation_line.name in renamed_tests:
                expectation_line.name = renamed_tests[expectation_line.name]
                # Upon parsing the file, a "path does not exist" warning is expected
                # to be there for tests that have been renamed, and if there are warnings,
                # then the original string is used. If the warnings are reset, then the
                # expectation line is re-serialized when output.
                expectation_line.warnings = []
                changed_lines.append(expectation_line)
            new_lines.append(expectation_line)
        new_file_contents = TestExpectations.list_to_string(
            new_lines, reconstitute_only_these=changed_lines)
        self.host.filesystem.write_text_file(path, new_file_contents)

    def _list_deleted_tests(self):
        """Returns a list of layout tests that have been deleted."""
        out = self.check_run([
            'git', 'diff', 'origin/master', '-M100%', '--diff-filter=D',
            '--name-only'
        ])
        deleted_tests = []
        for line in out.splitlines():
            test = self.finder.layout_test_name(line)
            if test:
                deleted_tests.append(test)
        return deleted_tests

    def _list_renamed_tests(self):
        """Returns a dict mapping source to dest name for layout tests that have been renamed."""
        out = self.check_run([
            'git', 'diff', 'origin/master', '-M100%', '--diff-filter=R',
            '--name-status'
        ])
        renamed_tests = {}
        for line in out.splitlines():
            _, source_path, dest_path = line.split()
            source_test = self.finder.layout_test_name(source_path)
            dest_test = self.finder.layout_test_name(dest_path)
            if source_test and dest_test:
                renamed_tests[source_test] = dest_test
        return renamed_tests
Exemple #19
0
class DepsUpdater(object):

    def __init__(self, host):
        self.host = host
        self.executive = host.executive
        self.fs = host.filesystem
        self.finder = WebKitFinder(self.fs)
        self.verbose = False
        self.git_cl = None

    def main(self, argv=None):
        options = self.parse_args(argv)
        self.verbose = options.verbose

        if not self.checkout_is_okay(options.allow_local_commits):
            return 1

        self.git_cl = GitCL(self.host, auth_refresh_token_json=options.auth_refresh_token_json)

        self.print_('## Noting the current Chromium commit.')
        _, show_ref_output = self.run(['git', 'show-ref', 'HEAD'])
        chromium_commitish = show_ref_output.split()[0]

        if options.target == 'wpt':
            import_commitish = self.update(WPT_DEST_NAME, WPT_REPO_URL, options.keep_w3c_repos_around, options.revision)
            self._copy_resources()
        elif options.target == 'css':
            import_commitish = self.update(CSS_DEST_NAME, CSS_REPO_URL, options.keep_w3c_repos_around, options.revision)
        else:
            raise AssertionError("Unsupported target %s" % options.target)

        has_changes = self.commit_changes_if_needed(chromium_commitish, import_commitish)
        if options.auto_update and has_changes:
            commit_successful = self.do_auto_update()
            if not commit_successful:
                return 1
        return 0

    def parse_args(self, argv):
        parser = argparse.ArgumentParser()
        parser.description = __doc__
        parser.add_argument('-v', '--verbose', action='store_true',
                            help='log what we are doing')
        parser.add_argument('--allow-local-commits', action='store_true',
                            help='allow script to run even if we have local commits')
        parser.add_argument('--keep-w3c-repos-around', action='store_true',
                            help='leave the w3c repos around that were imported previously.')
        parser.add_argument('-r', dest='revision', action='store',
                            help='Target revision.')
        parser.add_argument('target', choices=['css', 'wpt'],
                            help='Target repository.  "css" for csswg-test, "wpt" for web-platform-tests.')
        parser.add_argument('--auto-update', action='store_true',
                            help='uploads CL and initiates commit queue.')
        parser.add_argument('--auth-refresh-token-json',
                            help='Rietveld auth refresh JSON token.')
        return parser.parse_args(argv)

    def checkout_is_okay(self, allow_local_commits):
        git_diff_retcode, _ = self.run(['git', 'diff', '--quiet', 'HEAD'], exit_on_failure=False)
        if git_diff_retcode:
            self.print_('## Checkout is dirty; aborting.')
            return False

        local_commits = self.run(['git', 'log', '--oneline', 'origin/master..HEAD'])[1]
        if local_commits and not allow_local_commits:
            self.print_('## Checkout has local commits; aborting. Use --allow-local-commits to allow this.')
            return False

        if self.fs.exists(self.path_from_webkit_base(WPT_DEST_NAME)):
            self.print_('## WebKit/%s exists; aborting.' % WPT_DEST_NAME)
            return False

        if self.fs.exists(self.path_from_webkit_base(CSS_DEST_NAME)):
            self.print_('## WebKit/%s repo exists; aborting.' % CSS_DEST_NAME)
            return False

        return True

    def _copy_resources(self):
        """Copies resources from LayoutTests/resources to wpt and vice versa.

        There are resources from our repository that we use instead of the
        upstream versions. Conversely, there are also some resources that
        are copied in the other direction.

        Specifically:
          - testharnessreport.js contains code needed to integrate our testing
            with testharness.js; we also want our code to be used for tests
            in wpt.
          - TODO(qyearsley, jsbell): Document why other other files are copied,
            or stop copying them if it's unnecessary.

        If this method is changed, the lists of files expected to be identical
        in LayoutTests/PRESUBMIT.py should also be changed.
        """
        # TODO(tkent): resources_to_copy_to_wpt is unnecessary after enabling
        # WPTServe.
        resources_to_copy_to_wpt = [
            ('testharnessreport.js', 'resources'),
            ('WebIDLParser.js', 'resources'),
            ('vendor-prefix.js', 'common'),
        ]
        resources_to_copy_from_wpt = [
            ('idlharness.js', 'resources'),
            ('testharness.js', 'resources'),
        ]
        for filename, wpt_subdir in resources_to_copy_to_wpt:
            source = self.path_from_webkit_base('LayoutTests', 'resources', filename)
            destination = self.path_from_webkit_base('LayoutTests', 'imported', WPT_DEST_NAME, wpt_subdir, filename)
            self.copyfile(source, destination)
            self.run(['git', 'add', destination])
        for filename, wpt_subdir in resources_to_copy_from_wpt:
            source = self.path_from_webkit_base('LayoutTests', 'imported', WPT_DEST_NAME, wpt_subdir, filename)
            destination = self.path_from_webkit_base('LayoutTests', 'resources', filename)
            self.copyfile(source, destination)
            self.run(['git', 'add', destination])

    def _generate_manifest(self, original_repo_path, dest_path):
        """Generate MANIFEST.json for imported tests.

        Run 'manifest' command if it exists in original_repo_path, and
        add generated MANIFEST.json to dest_path.
        """
        manifest_command = self.fs.join(original_repo_path, 'manifest')
        if not self.fs.exists(manifest_command):
            # Do nothing for csswg-test.
            return
        self.print_('## Generating MANIFEST.json')
        self.run([manifest_command, '--tests-root', dest_path])
        self.run(['git', 'add', self.fs.join(dest_path, 'MANIFEST.json')])

    def update(self, dest_dir_name, url, keep_w3c_repos_around, revision):
        """Updates an imported repository.

        Args:
            dest_dir_name: The destination directory name.
            url: URL of the git repository.
            revision: Commit hash or None.

        Returns:
            A string for the commit description "<destination>@<commitish>".
        """
        temp_repo_path = self.path_from_webkit_base(dest_dir_name)
        self.print_('## Cloning %s into %s.' % (url, temp_repo_path))
        self.run(['git', 'clone', url, temp_repo_path])

        if revision is not None:
            self.print_('## Checking out %s' % revision)
            self.run(['git', 'checkout', revision], cwd=temp_repo_path)
        self.run(['git', 'submodule', 'update', '--init', '--recursive'], cwd=temp_repo_path)

        self.print_('## Noting the revision we are importing.')
        _, show_ref_output = self.run(['git', 'show-ref', 'origin/master'], cwd=temp_repo_path)
        master_commitish = show_ref_output.split()[0]

        self.print_('## Cleaning out tests from LayoutTests/imported/%s.' % dest_dir_name)
        dest_path = self.path_from_webkit_base('LayoutTests', 'imported', dest_dir_name)
        files_to_delete = self.fs.files_under(dest_path, file_filter=self.is_not_baseline)
        for subpath in files_to_delete:
            self.remove('LayoutTests', 'imported', subpath)

        self.print_('## Importing the tests.')
        src_repo = self.path_from_webkit_base(dest_dir_name)
        import_path = self.path_from_webkit_base('Tools', 'Scripts', 'import-w3c-tests')
        self.run([self.host.executable, import_path, '-d', 'imported', src_repo])

        self.run(['git', 'add', '--all', 'LayoutTests/imported/%s' % dest_dir_name])

        self.print_('## Deleting any orphaned baselines.')
        previous_baselines = self.fs.files_under(dest_path, file_filter=self.is_baseline)
        for subpath in previous_baselines:
            full_path = self.fs.join(dest_path, subpath)
            if self.fs.glob(full_path.replace('-expected.txt', '*')) == [full_path]:
                self.fs.remove(full_path)

        self._generate_manifest(temp_repo_path, dest_path)
        if not keep_w3c_repos_around:
            self.print_('## Deleting temp repo directory %s.' % temp_repo_path)
            self.rmtree(temp_repo_path)

        self.print_('## Updating TestExpectations for any removed or renamed tests.')
        self.update_all_test_expectations_files(self._list_deleted_tests(), self._list_renamed_tests())

        return '%s@%s' % (dest_dir_name, master_commitish)

    def commit_changes_if_needed(self, chromium_commitish, import_commitish):
        if self.run(['git', 'diff', '--quiet', 'HEAD'], exit_on_failure=False)[0]:
            self.print_('## Committing changes.')
            commit_msg = ('Import %s\n'
                          '\n'
                          'Using update-w3c-deps in Chromium %s.\n'
                          % (import_commitish, chromium_commitish))
            path_to_commit_msg = self.path_from_webkit_base('commit_msg')
            if self.verbose:
                self.print_('cat > %s <<EOF' % path_to_commit_msg)
                self.print_(commit_msg)
                self.print_('EOF')
            self.fs.write_text_file(path_to_commit_msg, commit_msg)
            self.run(['git', 'commit', '-a', '-F', path_to_commit_msg])
            self.remove(path_to_commit_msg)
            self.print_('## Done: changes imported and committed.')
            return True
        else:
            self.print_('## Done: no changes to import.')
            return False

    # Callback for FileSystem.files_under; not all arguments used - pylint: disable=unused-argument
    def is_baseline(self, fs, dirname, basename):
        return basename.endswith('-expected.txt')

    def is_not_baseline(self, fs, dirname, basename):
        return not self.is_baseline(fs, dirname, basename)

    def run(self, cmd, exit_on_failure=True, cwd=None):
        if self.verbose:
            self.print_(' '.join(cmd))

        cwd = cwd or self.finder.webkit_base()
        proc = self.executive.popen(cmd, stdout=self.executive.PIPE, stderr=self.executive.PIPE, cwd=cwd)
        out, err = proc.communicate()
        if proc.returncode or self.verbose:
            self.print_('# ret> %d' % proc.returncode)
            if out:
                for line in out.splitlines():
                    self.print_('# out> %s' % line)
            if err:
                for line in err.splitlines():
                    self.print_('# err> %s' % line)
        if exit_on_failure and proc.returncode:
            self.host.exit(proc.returncode)
        return proc.returncode, out

    def check_run(self, command):
        return_code, out = self.run(command)
        if return_code:
            raise Exception('%s failed with exit code %d.' % ' '.join(command), return_code)
        return out

    def copyfile(self, source, destination):
        if self.verbose:
            self.print_('cp %s %s' % (source, destination))
        self.fs.copyfile(source, destination)

    def remove(self, *comps):
        dest = self.path_from_webkit_base(*comps)
        if self.verbose:
            self.print_('rm %s' % dest)
        self.fs.remove(dest)

    def rmtree(self, *comps):
        dest = self.path_from_webkit_base(*comps)
        if self.verbose:
            self.print_('rm -fr %s' % dest)
        self.fs.rmtree(dest)

    def path_from_webkit_base(self, *comps):
        return self.finder.path_from_webkit_base(*comps)

    def print_(self, msg):
        self.host.print_(msg)

    def do_auto_update(self):
        """Attempts to upload a CL, make any required adjustments, and commit.

        This function assumes that the imported repo has already been updated,
        and that change has been committed. There may be newly-failing tests,
        so before being able to commit these new changes, we may need to update
        TestExpectations or download new baselines.

        Returns:
            True if successfully committed, False otherwise.
        """
        self._upload_cl()
        self.print_('## ' + self.git_cl.run(['issue']).strip())

        # First try: if there are failures, update expectations.
        self.print_('## Triggering try jobs.')
        for try_bot in self.host.builders.all_try_builder_names():
            self.git_cl.run(['try', '-b', try_bot])
        try_results = self.git_cl.wait_for_try_jobs()
        if not try_results:
            self.print_('## Timed out waiting for try results.')
            return
        if try_results and self.git_cl.has_failing_try_results(try_results):
            self.fetch_new_expectations_and_baselines()

        # Second try: if there are failures, then abort.
        self.git_cl.run(['set-commit', '--rietveld'])
        try_results = self.git_cl.wait_for_try_jobs()
        if not try_results:
            self.print_('Timed out waiting for try results.')
            self.git_cl.run(['set-close'])
            return False
        if self.git_cl.has_failing_try_results(try_results):
            self.print_('CQ failed; aborting.')
            self.git_cl.run(['set-close'])
            return False
        self.print_('## Update completed.')
        return True

    def _upload_cl(self):
        self.print_('## Uploading change list.')
        cc_list = self.get_directory_owners_to_cc()
        last_commit_message = self.check_run(['git', 'log', '-1', '--format=%B'])
        commit_message = last_commit_message + '[email protected]'
        self.git_cl.run([
            'upload',
            '-f',
            '--rietveld',
            '-m',
            commit_message,
        ] + ['--cc=' + email for email in cc_list])

    def get_directory_owners_to_cc(self):
        """Returns a list of email addresses to CC for the current import."""
        self.print_('## Gathering directory owners emails to CC.')
        directory_owners_file_path = self.finder.path_from_webkit_base(
            'Tools', 'Scripts', 'webkitpy', 'w3c', 'directory_owners.json')
        with open(directory_owners_file_path) as data_file:
            directory_to_owner = self.parse_directory_owners(json.load(data_file))
        out = self.check_run(['git', 'diff', 'origin/master', '--name-only'])
        changed_files = out.splitlines()
        return self.generate_email_list(changed_files, directory_to_owner)

    @staticmethod
    def parse_directory_owners(decoded_data_file):
        directory_dict = {}
        for dict_set in decoded_data_file:
            if dict_set['notification-email']:
                directory_dict[dict_set['directory']] = dict_set['notification-email']
        return directory_dict

    def generate_email_list(self, changed_files, directory_to_owner):
        """Returns a list of email addresses based on the given file list and
        directory-to-owner mapping.

        Args:
            changed_files: A list of file paths relative to the repository root.
            directory_to_owner: A dict mapping layout test directories to emails.

        Returns:
            A list of the email addresses to be notified for the current import.
        """
        email_addresses = set()
        for file_path in changed_files:
            test_path = self.finder.layout_test_name(file_path)
            if test_path is None:
                continue
            test_dir = self.fs.dirname(test_path)
            if test_dir in directory_to_owner:
                email_addresses.add(directory_to_owner[test_dir])
        return sorted(email_addresses)

    def fetch_new_expectations_and_baselines(self):
        """Adds new expectations and downloads baselines based on try job results, then commits and uploads the change."""
        self.print_('## Adding test expectations lines to LayoutTests/TestExpectations.')
        script_path = self.path_from_webkit_base('Tools', 'Scripts', 'update-w3c-test-expectations')
        self.run([self.host.executable, script_path, '--verbose'])
        message = 'Modify TestExpectations or download new baselines for tests.'
        self.check_run(['git', 'commit', '-a', '-m', message])
        self.git_cl.run(['upload', '-m', message, '--rietveld'])

    def update_all_test_expectations_files(self, deleted_tests, renamed_tests):
        """Updates all test expectations files for tests that have been deleted or renamed."""
        port = self.host.port_factory.get()
        for path, file_contents in port.all_expectations_dict().iteritems():

            parser = TestExpectationParser(port, all_tests=None, is_lint_mode=False)
            expectation_lines = parser.parse(path, file_contents)
            self._update_single_test_expectations_file(path, expectation_lines, deleted_tests, renamed_tests)

    def _update_single_test_expectations_file(self, path, expectation_lines, deleted_tests, renamed_tests):
        """Updates single test expectations file."""
        # FIXME: This won't work for removed or renamed directories with test expectations
        # that are directories rather than individual tests.
        new_lines = []
        changed_lines = []
        for expectation_line in expectation_lines:
            if expectation_line.name in deleted_tests:
                continue
            if expectation_line.name in renamed_tests:
                expectation_line.name = renamed_tests[expectation_line.name]
                # Upon parsing the file, a "path does not exist" warning is expected
                # to be there for tests that have been renamed, and if there are warnings,
                # then the original string is used. If the warnings are reset, then the
                # expectation line is re-serialized when output.
                expectation_line.warnings = []
                changed_lines.append(expectation_line)
            new_lines.append(expectation_line)
        new_file_contents = TestExpectations.list_to_string(new_lines, reconstitute_only_these=changed_lines)
        self.host.filesystem.write_text_file(path, new_file_contents)

    def _list_deleted_tests(self):
        """Returns a list of layout tests that have been deleted."""
        out = self.check_run(['git', 'diff', 'origin/master', '-M100%', '--diff-filter=D', '--name-only'])
        deleted_tests = []
        for line in out.splitlines():
            test = self.finder.layout_test_name(line)
            if test:
                deleted_tests.append(test)
        return deleted_tests

    def _list_renamed_tests(self):
        """Returns a dict mapping source to dest name for layout tests that have been renamed."""
        out = self.check_run(['git', 'diff', 'origin/master', '-M100%', '--diff-filter=R', '--name-status'])
        renamed_tests = {}
        for line in out.splitlines():
            _, source_path, dest_path = line.split()
            source_test = self.finder.layout_test_name(source_path)
            dest_test = self.finder.layout_test_name(dest_path)
            if source_test and dest_test:
                renamed_tests[source_test] = dest_test
        return renamed_tests
Exemple #20
0
class TestCopier(object):

    def __init__(self, host, source_repo_path, dest_dir_name='external'):
        """Initializes variables to prepare for copying and converting files.

        Args:
            host: An instance of Host.
            source_repo_path: Path to the local checkout of a
                web-platform-tests or csswg-test repository.
            dest_dir_name: The name of the directory under the layout tests
                directory where imported tests should be copied to.
                TODO(qyearsley): This can be made into a constant.
        """
        self.host = host

        assert self.host.filesystem.exists(source_repo_path)
        self.source_repo_path = source_repo_path
        self.dest_dir_name = dest_dir_name

        self.filesystem = self.host.filesystem
        self.webkit_finder = WebKitFinder(self.filesystem)
        self._webkit_root = self.webkit_finder.webkit_base()
        self.layout_tests_dir = self.webkit_finder.path_from_webkit_base('LayoutTests')
        self.destination_directory = self.filesystem.normpath(
            self.filesystem.join(
                self.layout_tests_dir,
                dest_dir_name,
                self.filesystem.basename(self.source_repo_path)))
        self.import_in_place = (self.source_repo_path == self.destination_directory)
        self.dir_above_repo = self.filesystem.dirname(self.source_repo_path)
        self.is_wpt = self.filesystem.basename(self.source_repo_path) == 'wpt'

        self.import_list = []

        # This is just a FYI list of CSS properties that still need to be prefixed,
        # which may be output after importing.
        self._prefixed_properties = {}

    def do_import(self):
        _log.info('Importing %s into %s', self.source_repo_path, self.destination_directory)
        self.find_importable_tests()
        self.import_tests()

    def find_importable_tests(self):
        """Walks through the source directory to find what tests should be imported.

        This function sets self.import_list, which contains information about how many
        tests are being imported, and their source and destination paths.
        """
        paths_to_skip = self.find_paths_to_skip()

        for root, dirs, files in self.filesystem.walk(self.source_repo_path):
            cur_dir = root.replace(self.dir_above_repo + '/', '') + '/'
            _log.debug('Scanning %s...', cur_dir)
            total_tests = 0
            reftests = 0
            jstests = 0

            # Files in 'tools' are not for browser testing, so we skip them.
            # See: http://web-platform-tests.org/writing-tests/general-guidelines.html#tools
            dirs_to_skip = ('.git', 'test-plan', 'tools')

            # We copy all files in 'support', including HTML without metadata.
            # See: http://web-platform-tests.org/writing-tests/general-guidelines.html#support-files
            dirs_to_include = ('resources', 'support')

            if dirs:
                for name in dirs_to_skip:
                    if name in dirs:
                        dirs.remove(name)

                for path in paths_to_skip:
                    path_base = path.replace(self.dest_dir_name + '/', '')
                    path_base = path_base.replace(cur_dir, '')
                    path_full = self.filesystem.join(root, path_base)
                    if path_base in dirs:
                        _log.info('Skipping: %s', path_full)
                        dirs.remove(path_base)
                        if self.import_in_place:
                            self.filesystem.rmtree(path_full)

            copy_list = []

            for filename in files:
                path_full = self.filesystem.join(root, filename)
                path_base = path_full.replace(self.source_repo_path + '/', '')
                path_base = self.destination_directory.replace(self.layout_tests_dir + '/', '') + '/' + path_base
                if path_base in paths_to_skip:
                    if self.import_in_place:
                        _log.debug('Pruning: %s', path_base)
                        self.filesystem.remove(path_full)
                        continue
                    else:
                        continue
                # FIXME: This block should really be a separate function, but the early-continues make that difficult.

                # TODO(qyearsley): Remove the below block.
                if filename != '.gitignore' and (filename.startswith('.') or filename.endswith('.pl')):
                    _log.debug('Skipping: %s', path_full)
                    _log.debug('  Reason: Hidden files and perl scripts are not necessary.')
                    continue

                if filename == 'OWNERS' or filename == 'reftest.list':
                    # See http://crbug.com/584660 and http://crbug.com/582838.
                    _log.debug('Skipping: %s', path_full)
                    _log.debug('  Reason: This file may cause Chromium presubmit to fail.')
                    continue

                mimetype = mimetypes.guess_type(path_full)
                if ('html' not in str(mimetype[0]) and
                        'application/xhtml+xml' not in str(mimetype[0]) and
                        'application/xml' not in str(mimetype[0])):
                    copy_list.append({'src': path_full, 'dest': filename})
                    continue

                if self.filesystem.basename(root) in dirs_to_include:
                    copy_list.append({'src': path_full, 'dest': filename})
                    continue

                test_parser = TestParser(path_full, self.host)
                test_info = test_parser.analyze_test()
                if test_info is None:
                    copy_list.append({'src': path_full, 'dest': filename})
                    continue

                if 'reference' in test_info.keys():
                    ref_path_full = test_info['reference']
                    if not self.filesystem.exists(ref_path_full):
                        _log.warning('Skipping: %s', path_full)
                        _log.warning('  Reason: Ref file "%s" was not found.', ref_path_full)
                        continue

                    if not self.is_wpt:
                        # For csswg-test, we still need to add a ref file
                        # using WebKit naming conventions. See crbug.com/268729.
                        # FIXME: Remove this when csswg-test is merged into wpt.
                        test_basename = self.filesystem.basename(test_info['test'])
                        ref_file = self.filesystem.splitext(test_basename)[0] + '-expected'
                        ref_file += self.filesystem.splitext(ref_path_full)[1]
                        copy_list.append({
                            'src': test_info['reference'],
                            'dest': ref_file,
                            'reference_support_info': test_info['reference_support_info'],
                        })

                    reftests += 1
                    total_tests += 1
                    copy_list.append({'src': test_info['test'], 'dest': filename})

                elif 'jstest' in test_info.keys():
                    jstests += 1
                    total_tests += 1
                    copy_list.append({'src': path_full, 'dest': filename, 'is_jstest': True})

            if copy_list:
                # Only add this directory to the list if there's something to import
                self.import_list.append({'dirname': root, 'copy_list': copy_list,
                                         'reftests': reftests, 'jstests': jstests, 'total_tests': total_tests})

    def find_paths_to_skip(self):
        paths_to_skip = set()
        port = self.host.port_factory.get()
        w3c_import_expectations_path = self.webkit_finder.path_from_webkit_base('LayoutTests', 'W3CImportExpectations')
        w3c_import_expectations = self.filesystem.read_text_file(w3c_import_expectations_path)
        parser = TestExpectationParser(port, all_tests=(), is_lint_mode=False)
        expectation_lines = parser.parse(w3c_import_expectations_path, w3c_import_expectations)
        for line in expectation_lines:
            if 'SKIP' in line.expectations:
                if line.specifiers:
                    _log.warning('W3CImportExpectations:%s should not have any specifiers', line.line_numbers)
                    continue
                paths_to_skip.add(line.name)
        return paths_to_skip

    def import_tests(self):
        """Reads |self.import_list|, and converts and copies files to their destination."""
        total_imported_tests = 0
        total_imported_reftests = 0
        total_imported_jstests = 0

        for dir_to_copy in self.import_list:
            total_imported_tests += dir_to_copy['total_tests']
            total_imported_reftests += dir_to_copy['reftests']
            total_imported_jstests += dir_to_copy['jstests']

            if not dir_to_copy['copy_list']:
                continue

            orig_path = dir_to_copy['dirname']

            relative_dir = self.filesystem.relpath(orig_path, self.source_repo_path)
            dest_dir = self.filesystem.join(self.destination_directory, relative_dir)

            if not self.filesystem.exists(dest_dir):
                self.filesystem.maybe_make_directory(dest_dir)

            copied_files = []

            for file_to_copy in dir_to_copy['copy_list']:
                copied_file = self.copy_file(file_to_copy, dest_dir)
                if copied_file:
                    copied_files.append(copied_file)

        _log.info('')
        _log.info('Import complete')
        _log.info('')
        _log.info('IMPORTED %d TOTAL TESTS', total_imported_tests)
        _log.info('Imported %d reftests', total_imported_reftests)
        _log.info('Imported %d JS tests', total_imported_jstests)
        _log.info('Imported %d pixel/manual tests', total_imported_tests - total_imported_jstests - total_imported_reftests)
        _log.info('')

        if self._prefixed_properties:
            _log.info('Properties needing prefixes (by count):')
            for prefixed_property in sorted(self._prefixed_properties, key=lambda p: self._prefixed_properties[p]):
                _log.info('  %s: %s', prefixed_property, self._prefixed_properties[prefixed_property])

    def copy_file(self, file_to_copy, dest_dir):
        """Converts and copies a file, if it should be copied.

        Args:
            file_to_copy: A dict in a file copy list constructed by
                find_importable_tests, which represents one file to copy, including
                the keys:
                    "src": Absolute path to the source location of the file.
                    "destination": File name of the destination file.
                And possibly also the keys "reference_support_info" or "is_jstest".
            dest_dir: Path to the directory where the file should be copied.

        Returns:
            The path to the new file, relative to the Blink root (//third_party/WebKit).
        """
        source_path = self.filesystem.normpath(file_to_copy['src'])
        dest_path = self.filesystem.join(dest_dir, file_to_copy['dest'])

        if self.filesystem.isdir(source_path):
            _log.error('%s refers to a directory', source_path)
            return None

        if not self.filesystem.exists(source_path):
            _log.error('%s not found. Possible error in the test.', source_path)
            return None

        reference_support_info = file_to_copy.get('reference_support_info') or None

        if not self.filesystem.exists(self.filesystem.dirname(dest_path)):
            if not self.import_in_place:
                self.filesystem.maybe_make_directory(self.filesystem.dirname(dest_path))

        relpath = self.filesystem.relpath(dest_path, self.layout_tests_dir)
        # FIXME: Maybe doing a file diff is in order here for existing files?
        # In other words, there's no sense in overwriting identical files, but
        # there's no harm in copying the identical thing.
        _log.debug('  copying %s', relpath)

        if self.should_try_to_convert(file_to_copy, source_path, dest_dir):
            converted_file = convert_for_webkit(
                dest_dir, filename=source_path,
                reference_support_info=reference_support_info,
                host=self.host)
            for prefixed_property in converted_file[0]:
                self._prefixed_properties.setdefault(prefixed_property, 0)
                self._prefixed_properties[prefixed_property] += 1

            self.filesystem.write_text_file(dest_path, converted_file[1])
        else:
            if not self.import_in_place:
                self.filesystem.copyfile(source_path, dest_path)
                if self.filesystem.read_binary_file(source_path)[:2] == '#!':
                    self.filesystem.make_executable(dest_path)

        return dest_path.replace(self._webkit_root, '')

    @staticmethod
    def should_try_to_convert(file_to_copy, source_path, dest_dir):
        """Checks whether we should try to modify the file when importing."""
        if file_to_copy.get('is_jstest', False):
            return False

        # Conversion is not necessary for any tests in wpt now; see http://crbug.com/654081.
        # Note, we want to move away from converting files, see http://crbug.com/663773.
        if re.search(r'[/\\]external[/\\]wpt[/\\]', dest_dir):
            return False

        # Only HTML, XHTML and CSS files should be converted.
        mimetype, _ = mimetypes.guess_type(source_path)
        return mimetype in ('text/html', 'application/xhtml+xml', 'text/css')
Exemple #21
0
class DepsUpdater(object):
    def __init__(self, host):
        self.host = host
        self.executive = host.executive
        self.fs = host.filesystem
        self.finder = WebKitFinder(self.fs)
        self.verbose = False
        self.git_cl = None

    def main(self, argv=None):
        options = self.parse_args(argv)
        self.verbose = options.verbose

        if not self.checkout_is_okay(options.allow_local_commits):
            return 1

        self.git_cl = GitCL(self.host, auth_refresh_token_json=options.auth_refresh_token_json)

        self.print_("## Noting the current Chromium commit.")
        _, show_ref_output = self.run(["git", "show-ref", "HEAD"])
        chromium_commitish = show_ref_output.split()[0]

        if options.target == "wpt":
            import_commitish = self.update(WPT_DEST_NAME, WPT_REPO_URL, options.keep_w3c_repos_around, options.revision)
            self._copy_resources()
        elif options.target == "css":
            import_commitish = self.update(CSS_DEST_NAME, CSS_REPO_URL, options.keep_w3c_repos_around, options.revision)
        else:
            raise AssertionError("Unsupported target %s" % options.target)

        has_changes = self.commit_changes_if_needed(chromium_commitish, import_commitish)
        if options.auto_update and has_changes:
            commit_successful = self.do_auto_update()
            if not commit_successful:
                return 1
        return 0

    def parse_args(self, argv):
        parser = argparse.ArgumentParser()
        parser.description = __doc__
        parser.add_argument("-v", "--verbose", action="store_true", help="log what we are doing")
        parser.add_argument(
            "--allow-local-commits", action="store_true", help="allow script to run even if we have local commits"
        )
        parser.add_argument(
            "--keep-w3c-repos-around",
            action="store_true",
            help="leave the w3c repos around that were imported previously.",
        )
        parser.add_argument("-r", dest="revision", action="store", help="Target revision.")
        parser.add_argument(
            "target",
            choices=["css", "wpt"],
            help='Target repository.  "css" for csswg-test, "wpt" for web-platform-tests.',
        )
        parser.add_argument("--auto-update", action="store_true", help="uploads CL and initiates commit queue.")
        parser.add_argument("--auth-refresh-token-json", help="Rietveld auth refresh JSON token.")
        return parser.parse_args(argv)

    def checkout_is_okay(self, allow_local_commits):
        git_diff_retcode, _ = self.run(["git", "diff", "--quiet", "HEAD"], exit_on_failure=False)
        if git_diff_retcode:
            self.print_("## Checkout is dirty; aborting.")
            return False

        local_commits = self.run(["git", "log", "--oneline", "origin/master..HEAD"])[1]
        if local_commits and not allow_local_commits:
            self.print_("## Checkout has local commits; aborting. Use --allow-local-commits to allow this.")
            return False

        if self.fs.exists(self.path_from_webkit_base(WPT_DEST_NAME)):
            self.print_("## WebKit/%s exists; aborting." % WPT_DEST_NAME)
            return False

        if self.fs.exists(self.path_from_webkit_base(CSS_DEST_NAME)):
            self.print_("## WebKit/%s repo exists; aborting." % CSS_DEST_NAME)
            return False

        return True

    def _copy_resources(self):
        """Copies resources from LayoutTests/resources to wpt and vice versa.

        There are resources from our repository that we use instead of the
        upstream versions. Conversely, there are also some resources that
        are copied in the other direction.

        Specifically:
          - testharnessreport.js contains code needed to integrate our testing
            with testharness.js; we also want our code to be used for tests
            in wpt.
          - TODO(qyearsley, jsbell): Document why other other files are copied,
            or stop copying them if it's unnecessary.

        If this method is changed, the lists of files expected to be identical
        in LayoutTests/PRESUBMIT.py should also be changed.
        """
        resources_to_copy_to_wpt = [
            ("testharnessreport.js", "resources"),
            ("WebIDLParser.js", "resources"),
            ("vendor-prefix.js", "common"),
        ]
        resources_to_copy_from_wpt = [("idlharness.js", "resources"), ("testharness.js", "resources")]
        for filename, wpt_subdir in resources_to_copy_to_wpt:
            source = self.path_from_webkit_base("LayoutTests", "resources", filename)
            destination = self.path_from_webkit_base("LayoutTests", "imported", WPT_DEST_NAME, wpt_subdir, filename)
            self.copyfile(source, destination)
            self.run(["git", "add", destination])
        for filename, wpt_subdir in resources_to_copy_from_wpt:
            source = self.path_from_webkit_base("LayoutTests", "imported", WPT_DEST_NAME, wpt_subdir, filename)
            destination = self.path_from_webkit_base("LayoutTests", "resources", filename)
            self.copyfile(source, destination)
            self.run(["git", "add", destination])

    def update(self, dest_dir_name, url, keep_w3c_repos_around, revision):
        """Updates an imported repository.

        Args:
            dest_dir_name: The destination directory name.
            url: URL of the git repository.
            revision: Commit hash or None.

        Returns:
            A string for the commit description "<destination>@<commitish>".
        """
        temp_repo_path = self.path_from_webkit_base(dest_dir_name)
        self.print_("## Cloning %s into %s." % (url, temp_repo_path))
        self.run(["git", "clone", url, temp_repo_path])

        if revision is not None:
            self.print_("## Checking out %s" % revision)
            self.run(["git", "checkout", revision], cwd=temp_repo_path)
        self.run(["git", "submodule", "update", "--init", "--recursive"], cwd=temp_repo_path)

        self.print_("## Noting the revision we are importing.")
        _, show_ref_output = self.run(["git", "show-ref", "origin/master"], cwd=temp_repo_path)
        master_commitish = show_ref_output.split()[0]

        self.print_("## Cleaning out tests from LayoutTests/imported/%s." % dest_dir_name)
        dest_path = self.path_from_webkit_base("LayoutTests", "imported", dest_dir_name)
        files_to_delete = self.fs.files_under(dest_path, file_filter=self.is_not_baseline)
        for subpath in files_to_delete:
            self.remove("LayoutTests", "imported", subpath)

        self.print_("## Importing the tests.")
        src_repo = self.path_from_webkit_base(dest_dir_name)
        import_path = self.path_from_webkit_base("Tools", "Scripts", "import-w3c-tests")
        self.run([self.host.executable, import_path, "-d", "imported", src_repo])

        self.run(["git", "add", "--all", "LayoutTests/imported/%s" % dest_dir_name])

        self.print_("## Deleting manual tests.")
        files_to_delete = self.fs.files_under(dest_path, file_filter=self.is_manual_test)
        for subpath in files_to_delete:
            self.remove("LayoutTests", "imported", subpath)

        self.print_("## Deleting any orphaned baselines.")
        previous_baselines = self.fs.files_under(dest_path, file_filter=self.is_baseline)
        for subpath in previous_baselines:
            full_path = self.fs.join(dest_path, subpath)
            if self.fs.glob(full_path.replace("-expected.txt", "*")) == [full_path]:
                self.fs.remove(full_path)

        if not keep_w3c_repos_around:
            self.print_("## Deleting temp repo directory %s." % temp_repo_path)
            self.rmtree(temp_repo_path)

        self.print_("## Updating TestExpectations for any removed or renamed tests.")
        self.update_all_test_expectations_files(self._list_deleted_tests(), self._list_renamed_tests())

        return "%s@%s" % (dest_dir_name, master_commitish)

    def commit_changes_if_needed(self, chromium_commitish, import_commitish):
        if self.run(["git", "diff", "--quiet", "HEAD"], exit_on_failure=False)[0]:
            self.print_("## Committing changes.")
            commit_msg = (
                "Import %s\n" "\n" "Using update-w3c-deps in Chromium %s.\n" % (import_commitish, chromium_commitish)
            )
            path_to_commit_msg = self.path_from_webkit_base("commit_msg")
            if self.verbose:
                self.print_("cat > %s <<EOF" % path_to_commit_msg)
                self.print_(commit_msg)
                self.print_("EOF")
            self.fs.write_text_file(path_to_commit_msg, commit_msg)
            self.run(["git", "commit", "-a", "-F", path_to_commit_msg])
            self.remove(path_to_commit_msg)
            self.print_("## Done: changes imported and committed.")
            return True
        else:
            self.print_("## Done: no changes to import.")
            return False

    def is_manual_test(self, fs, dirname, basename):
        """Returns True if the file should be removed because it's a manual test.

        Tests with "-manual" in the name are not considered manual tests
        if there is a corresponding JS automation file.
        """
        basename_without_extension, _ = self.fs.splitext(basename)
        if not basename_without_extension.endswith("-manual"):
            return False
        dir_from_wpt = fs.relpath(dirname, self.path_from_webkit_base("LayoutTests", "imported", "wpt"))
        automation_dir = self.path_from_webkit_base("LayoutTests", "imported", "wpt_automation", dir_from_wpt)
        if fs.isfile(fs.join(automation_dir, "%s-automation.js" % basename_without_extension)):
            return False
        return True

    # Callback for FileSystem.files_under; not all arguments used - pylint: disable=unused-argument
    def is_baseline(self, fs, dirname, basename):
        return basename.endswith("-expected.txt")

    def is_not_baseline(self, fs, dirname, basename):
        return not self.is_baseline(fs, dirname, basename)

    def run(self, cmd, exit_on_failure=True, cwd=None):
        if self.verbose:
            self.print_(" ".join(cmd))

        cwd = cwd or self.finder.webkit_base()
        proc = self.executive.popen(cmd, stdout=self.executive.PIPE, stderr=self.executive.PIPE, cwd=cwd)
        out, err = proc.communicate()
        if proc.returncode or self.verbose:
            self.print_("# ret> %d" % proc.returncode)
            if out:
                for line in out.splitlines():
                    self.print_("# out> %s" % line)
            if err:
                for line in err.splitlines():
                    self.print_("# err> %s" % line)
        if exit_on_failure and proc.returncode:
            self.host.exit(proc.returncode)
        return proc.returncode, out

    def check_run(self, command):
        return_code, out = self.run(command)
        if return_code:
            raise Exception("%s failed with exit code %d." % " ".join(command), return_code)
        return out

    def copyfile(self, source, destination):
        if self.verbose:
            self.print_("cp %s %s" % (source, destination))
        self.fs.copyfile(source, destination)

    def remove(self, *comps):
        dest = self.path_from_webkit_base(*comps)
        if self.verbose:
            self.print_("rm %s" % dest)
        self.fs.remove(dest)

    def rmtree(self, *comps):
        dest = self.path_from_webkit_base(*comps)
        if self.verbose:
            self.print_("rm -fr %s" % dest)
        self.fs.rmtree(dest)

    def path_from_webkit_base(self, *comps):
        return self.finder.path_from_webkit_base(*comps)

    def print_(self, msg):
        self.host.print_(msg)

    def do_auto_update(self):
        """Attempts to upload a CL, make any required adjustments, and commit.

        This function assumes that the imported repo has already been updated,
        and that change has been committed. There may be newly-failing tests,
        so before being able to commit these new changes, we may need to update
        TestExpectations or download new baselines.

        Returns:
            True if successfully committed, False otherwise.
        """
        self._upload_cl()
        self.print_("## " + self.git_cl.run(["issue"]).strip())

        # First try: if there are failures, update expectations.
        self.print_("## Triggering try jobs.")
        for try_bot in self.host.builders.all_try_builder_names():
            self.git_cl.run(["try", "-b", try_bot])
        try_results = self.git_cl.wait_for_try_jobs()
        if not try_results:
            self.print_("## Timed out waiting for try results.")
            return
        if try_results and self.git_cl.has_failing_try_results(try_results):
            self.fetch_new_expectations_and_baselines()

        # Second try: if there are failures, then abort.
        self.git_cl.run(["set-commit", "--rietveld"])
        try_results = self.git_cl.wait_for_try_jobs()
        if not try_results:
            self.print_("Timed out waiting for try results.")
            self.git_cl.run(["set-close"])
            return False
        if self.git_cl.has_failing_try_results(try_results):
            self.print_("CQ failed; aborting.")
            self.git_cl.run(["set-close"])
            return False
        self.print_("## Update completed.")
        return True

    def _upload_cl(self):
        self.print_("## Uploading change list.")
        cc_list = self.get_directory_owners_to_cc()
        last_commit_message = self.check_run(["git", "log", "-1", "--format=%B"])
        commit_message = last_commit_message + "[email protected]"
        self.git_cl.run(["upload", "-f", "--rietveld", "-m", commit_message] + ["--cc=" + email for email in cc_list])

    def get_directory_owners_to_cc(self):
        """Returns a list of email addresses to CC for the current import."""
        self.print_("## Gathering directory owners emails to CC.")
        directory_owners_file_path = self.finder.path_from_webkit_base(
            "Tools", "Scripts", "webkitpy", "w3c", "directory_owners.json"
        )
        with open(directory_owners_file_path) as data_file:
            directory_to_owner = self.parse_directory_owners(json.load(data_file))
        out = self.check_run(["git", "diff", "origin/master", "--name-only"])
        changed_files = out.splitlines()
        return self.generate_email_list(changed_files, directory_to_owner)

    @staticmethod
    def parse_directory_owners(decoded_data_file):
        directory_dict = {}
        for dict_set in decoded_data_file:
            if dict_set["notification-email"]:
                directory_dict[dict_set["directory"]] = dict_set["notification-email"]
        return directory_dict

    def generate_email_list(self, changed_files, directory_to_owner):
        """Returns a list of email addresses based on the given file list and
        directory-to-owner mapping.

        Args:
            changed_files: A list of file paths relative to the repository root.
            directory_to_owner: A dict mapping layout test directories to emails.

        Returns:
            A list of the email addresses to be notified for the current import.
        """
        email_addresses = set()
        for file_path in changed_files:
            test_path = self.finder.layout_test_name(file_path)
            if test_path is None:
                continue
            test_dir = self.fs.dirname(test_path)
            if test_dir in directory_to_owner:
                email_addresses.add(directory_to_owner[test_dir])
        return sorted(email_addresses)

    def fetch_new_expectations_and_baselines(self):
        """Adds new expectations and downloads baselines based on try job results, then commits and uploads the change."""
        self.print_("## Adding test expectations lines to LayoutTests/TestExpectations.")
        script_path = self.path_from_webkit_base("Tools", "Scripts", "update-w3c-test-expectations")
        self.run([self.host.executable, script_path, "--verbose"])
        message = "Modify TestExpectations or download new baselines for tests."
        self.check_run(["git", "commit", "-a", "-m", message])
        self.git_cl.run(["upload", "-m", message, "--rietveld"])

    def update_all_test_expectations_files(self, deleted_tests, renamed_tests):
        """Updates all test expectations files for tests that have been deleted or renamed."""
        port = self.host.port_factory.get()
        for path, file_contents in port.all_expectations_dict().iteritems():

            parser = TestExpectationParser(port, all_tests=None, is_lint_mode=False)
            expectation_lines = parser.parse(path, file_contents)
            self._update_single_test_expectations_file(path, expectation_lines, deleted_tests, renamed_tests)

    def _update_single_test_expectations_file(self, path, expectation_lines, deleted_tests, renamed_tests):
        """Updates single test expectations file."""
        # FIXME: This won't work for removed or renamed directories with test expectations
        # that are directories rather than individual tests.
        new_lines = []
        changed_lines = []
        for expectation_line in expectation_lines:
            if expectation_line.name in deleted_tests:
                continue
            if expectation_line.name in renamed_tests:
                expectation_line.name = renamed_tests[expectation_line.name]
                # Upon parsing the file, a "path does not exist" warning is expected
                # to be there for tests that have been renamed, and if there are warnings,
                # then the original string is used. If the warnings are reset, then the
                # expectation line is re-serialized when output.
                expectation_line.warnings = []
                changed_lines.append(expectation_line)
            new_lines.append(expectation_line)
        new_file_contents = TestExpectations.list_to_string(new_lines, reconstitute_only_these=changed_lines)
        self.host.filesystem.write_text_file(path, new_file_contents)

    def _list_deleted_tests(self):
        """Returns a list of layout tests that have been deleted."""
        out = self.check_run(["git", "diff", "origin/master", "--diff-filter=D", "--name-only"])
        deleted_tests = []
        for line in out.splitlines():
            test = self.finder.layout_test_name(line)
            if test:
                deleted_tests.append(test)
        return deleted_tests

    def _list_renamed_tests(self):
        """Returns a dict mapping source to dest name for layout tests that have been renamed."""
        out = self.check_run(["git", "diff", "origin/master", "--diff-filter=R", "--name-status"])
        renamed_tests = {}
        for line in out.splitlines():
            _, source_path, dest_path = line.split()
            source_test = self.finder.layout_test_name(source_path)
            dest_test = self.finder.layout_test_name(dest_path)
            if source_test and dest_test:
                renamed_tests[source_test] = dest_test
        return renamed_tests
Exemple #22
0
    def generate_manifest(host, dest_path):
        """Generates MANIFEST.json on the specified directory."""
        executive = host.executive
        finder = WebKitFinder(host.filesystem)
        manifest_exec_path = finder.path_from_webkit_base('Tools', 'Scripts', 'webkitpy', 'thirdparty', 'wpt', 'wpt', 'manifest')

        cmd = ['python', manifest_exec_path, '--work', '--tests-root', dest_path]
        _log.debug('Running command: %s', ' '.join(cmd))
        proc = executive.popen(cmd, stdout=executive.PIPE, stderr=executive.PIPE, stdin=executive.PIPE, cwd=finder.webkit_base())
        out, err = proc.communicate('')
        if proc.returncode:
            _log.info('# ret> %d' % proc.returncode)
            if out:
                _log.info(out)
            if err:
                _log.info(err)
            host.exit(proc.returncode)
        return proc.returncode, out
Exemple #23
0
class DepsUpdater(object):

    def __init__(self, host):
        self.host = host
        self.executive = host.executive
        self.fs = host.filesystem
        self.finder = WebKitFinder(self.fs)
        self.verbose = False
        self.allow_local_commits = False
        self.keep_w3c_repos_around = False
        self.target = None

    def main(self, argv=None):
        self.parse_args(argv)

        if not self.checkout_is_okay():
            return 1

        self.print_('## Noting the current Chromium commit.')
        _, show_ref_output = self.run(['git', 'show-ref', 'HEAD'])
        chromium_commitish = show_ref_output.split()[0]

        if self.target == 'wpt':
            import_commitish = self.update(
                WPT_DEST_NAME,
                'https://chromium.googlesource.com/external/w3c/web-platform-tests.git')

            for resource in ['testharnessreport.js', 'WebIDLParser.js']:
                source = self.path_from_webkit_base('LayoutTests', 'resources', resource)
                destination = self.path_from_webkit_base('LayoutTests', 'imported', WPT_DEST_NAME, 'resources', resource)
                self.copyfile(source, destination)
                self.run(['git', 'add', destination])
            for resource in ['vendor-prefix.js']:
                source = self.path_from_webkit_base('LayoutTests', 'resources', resource)
                destination = self.path_from_webkit_base('LayoutTests', 'imported', WPT_DEST_NAME, 'common', resource)
                self.copyfile(source, destination)
                self.run(['git', 'add', destination])

        elif self.target == 'css':
            import_commitish = self.update(
                CSS_DEST_NAME,
                'https://chromium.googlesource.com/external/w3c/csswg-test.git')
        else:
            raise AssertionError("Unsupported target %s" % self.target)

        self.commit_changes_if_needed(chromium_commitish, import_commitish)

        return 0

    def parse_args(self, argv):
        parser = argparse.ArgumentParser()
        parser.description = __doc__
        parser.add_argument('-v', '--verbose', action='store_true',
                            help='log what we are doing')
        parser.add_argument('--allow-local-commits', action='store_true',
                            help='allow script to run even if we have local commits')
        parser.add_argument('--keep-w3c-repos-around', action='store_true',
                            help='leave the w3c repos around that were imported previously.')
        parser.add_argument('target', choices=['css', 'wpt'],
                            help='Target repository.  "css" for csswg-test, "wpt" for web-platform-tests.')

        args = parser.parse_args(argv)
        self.allow_local_commits = args.allow_local_commits
        self.keep_w3c_repos_around = args.keep_w3c_repos_around
        self.verbose = args.verbose
        self.target = args.target

    def checkout_is_okay(self):
        git_diff_retcode, _ = self.run(['git', 'diff', '--quiet', 'HEAD'], exit_on_failure=False)
        if git_diff_retcode:
            self.print_('## Checkout is dirty; aborting.')
            return False

        local_commits = self.run(['git', 'log', '--oneline', 'origin/master..HEAD'])[1]
        if local_commits and not self.allow_local_commits:
            self.print_('## Checkout has local commits; aborting. Use --allow-local-commits to allow this.')
            return False

        if self.fs.exists(self.path_from_webkit_base(WPT_DEST_NAME)):
            self.print_('## WebKit/%s exists; aborting.' % WPT_DEST_NAME)
            return False

        if self.fs.exists(self.path_from_webkit_base(CSS_DEST_NAME)):
            self.print_('## WebKit/%s repo exists; aborting.' % CSS_DEST_NAME)
            return False

        return True

    def update(self, dest_dir_name, url):
        """Updates an imported repository.

        Args:
            dest_dir_name: The destination directory name.
            url: URL of the git repository.

        Returns:
            A string for the commit description "<destination>@<commitish>".
        """
        temp_repo_path = self.path_from_webkit_base(dest_dir_name)
        self.print_('## Cloning %s into %s.' % (url, temp_repo_path))
        self.run(['git', 'clone', url, temp_repo_path])

        self.run(['git', 'submodule', 'update', '--init', '--recursive'], cwd=temp_repo_path)

        self.print_('## Noting the revision we are importing.')
        _, show_ref_output = self.run(['git', 'show-ref', 'origin/master'], cwd=temp_repo_path)
        master_commitish = show_ref_output.split()[0]

        self.print_('## Cleaning out tests from LayoutTests/imported/%s.' % dest_dir_name)
        dest_path = self.path_from_webkit_base('LayoutTests', 'imported', dest_dir_name)
        files_to_delete = self.fs.files_under(dest_path, file_filter=self.is_not_baseline)
        for subpath in files_to_delete:
            self.remove('LayoutTests', 'imported', subpath)

        self.print_('## Importing the tests.')
        src_repo = self.path_from_webkit_base(dest_dir_name)
        import_path = self.path_from_webkit_base('Tools', 'Scripts', 'import-w3c-tests')
        self.run([self.host.executable, import_path, '-d', 'imported', src_repo])

        self.run(['git', 'add', '--all', 'LayoutTests/imported/%s' % dest_dir_name])

        self.print_('## Deleting manual tests.')
        files_to_delete = self.fs.files_under(dest_path, file_filter=self.is_manual_test)
        for subpath in files_to_delete:
            self.remove('LayoutTests', 'imported', subpath)

        self.print_('## Deleting any orphaned baselines.')
        previous_baselines = self.fs.files_under(dest_path, file_filter=self.is_baseline)
        for subpath in previous_baselines:
            full_path = self.fs.join(dest_path, subpath)
            if self.fs.glob(full_path.replace('-expected.txt', '*')) == [full_path]:
                self.fs.remove(full_path)

        if not self.keep_w3c_repos_around:
            self.print_('## Deleting temp repo directory %s.' % temp_repo_path)
            self.rmtree(temp_repo_path)

        return '%s@%s' % (dest_dir_name, master_commitish)

    def commit_changes_if_needed(self, chromium_commitish, import_commitish):
        if self.run(['git', 'diff', '--quiet', 'HEAD'], exit_on_failure=False)[0]:
            self.print_('## Committing changes.')
            commit_msg = ('Import %s\n'
                          '\n'
                          'Using update-w3c-deps in Chromium %s.\n'
                          % (import_commitish, chromium_commitish))
            path_to_commit_msg = self.path_from_webkit_base('commit_msg')
            if self.verbose:
                self.print_('cat > %s <<EOF' % path_to_commit_msg)
                self.print_(commit_msg)
                self.print_('EOF')
            self.fs.write_text_file(path_to_commit_msg, commit_msg)
            self.run(['git', 'commit', '-a', '-F', path_to_commit_msg])
            self.remove(path_to_commit_msg)
            self.print_('## Done: changes imported and committed.')
        else:
            self.print_('## Done: no changes to import.')

    def is_manual_test(self, fs, dirname, basename):  # Callback for FileSystem.files_under; not all arguments used - pylint: disable=unused-argument
        # We are importing manual pointer event tests and we are automating them.
        return ("pointerevents" not in dirname) and (basename.endswith('-manual.html') or basename.endswith('-manual.htm'))

    def is_baseline(self, fs, dirname, basename):  # Callback for FileSystem.files_under; not all arguments used - pylint: disable=unused-argument
        return basename.endswith('-expected.txt')

    def is_not_baseline(self, fs, dirname, basename):
        return not self.is_baseline(fs, dirname, basename)

    def run(self, cmd, exit_on_failure=True, cwd=None):
        if self.verbose:
            self.print_(' '.join(cmd))

        cwd = cwd or self.finder.webkit_base()
        proc = self.executive.popen(cmd, stdout=self.executive.PIPE, stderr=self.executive.PIPE, cwd=cwd)
        out, err = proc.communicate()
        if proc.returncode or self.verbose:
            self.print_('# ret> %d' % proc.returncode)
            if out:
                for line in out.splitlines():
                    self.print_('# out> %s' % line)
            if err:
                for line in err.splitlines():
                    self.print_('# err> %s' % line)
        if exit_on_failure and proc.returncode:
            self.host.exit(proc.returncode)
        return proc.returncode, out

    def copyfile(self, source, destination):
        if self.verbose:
            self.print_('cp %s %s' % (source, destination))
        self.fs.copyfile(source, destination)

    def remove(self, *comps):
        dest = self.path_from_webkit_base(*comps)
        if self.verbose:
            self.print_('rm %s' % dest)
        self.fs.remove(dest)

    def rmtree(self, *comps):
        dest = self.path_from_webkit_base(*comps)
        if self.verbose:
            self.print_('rm -fr %s' % dest)
        self.fs.rmtree(dest)

    def path_from_webkit_base(self, *comps):
        return self.finder.path_from_webkit_base(*comps)

    def print_(self, msg):
        self.host.print_(msg)
Exemple #24
0
 def test_webkit_base(self):
     finder = WebKitFinder(MockFileSystem())
     self.assertEqual(finder.webkit_base(),
                      '/mock-checkout/third_party/WebKit')
 def test_webkit_base(self):
     finder = WebKitFinder(MockFileSystem())
     self.assertEqual(finder.webkit_base(), '/mock-checkout/third_party/WebKit')
class WPTExpectationsUpdater(object):
    def __init__(self, host):
        self.host = host
        self.finder = WebKitFinder(self.host.filesystem)

    def run(self, args=None):
        """Downloads text new baselines and adds test expectations lines."""
        parser = argparse.ArgumentParser(description=__doc__)
        parser.add_argument('-v',
                            '--verbose',
                            action='store_true',
                            help='More verbose logging.')
        args = parser.parse_args(args)

        log_level = logging.DEBUG if args.verbose else logging.INFO
        logging.basicConfig(level=log_level, format='%(message)s')

        issue_number = self.get_issue_number()
        if issue_number == 'None':
            _log.error('No issue on current branch.')
            return 1

        builds = self.get_latest_try_jobs()
        _log.debug('Latest try jobs: %r', builds)
        if not builds:
            _log.error('No try job information was collected.')
            return 1

        # Here we build up a dict of failing test results for all platforms.
        test_expectations = {}
        for build in builds:
            port_results = self.get_failing_results_dict(build)
            test_expectations = self.merge_dicts(test_expectations,
                                                 port_results)

        # And then we merge results for different platforms that had the same results.
        for test_name, platform_result in test_expectations.iteritems():
            # platform_result is a dict mapping platforms to results.
            test_expectations[test_name] = self.merge_same_valued_keys(
                platform_result)

        test_expectations = self.download_text_baselines(test_expectations)
        test_expectation_lines = self.create_line_list(test_expectations)
        self.write_to_test_expectations(test_expectation_lines)
        return 0

    def get_issue_number(self):
        """Returns current CL number. Can be replaced in unit tests."""
        return GitCL(self.host).get_issue_number()

    def get_latest_try_jobs(self):
        """Returns the latest finished try jobs as Build objects."""
        return GitCL(self.host).latest_try_jobs(self._get_try_bots())

    def get_failing_results_dict(self, build):
        """Returns a nested dict of failing test results.

        Retrieves a full list of layout test results from a builder result URL.
        Collects the builder name, platform and a list of tests that did not
        run as expected.

        Args:
            build: A Build object.

        Returns:
            A dictionary with the structure: {
                'full-port-name': {
                    'expected': 'TIMEOUT',
                    'actual': 'CRASH',
                    'bug': 'crbug.com/11111'
                }
            }
            If there are no failing results or no results could be fetched,
            this will return an empty dictionary.
        """
        layout_test_results = self.host.buildbot.fetch_results(build)
        if layout_test_results is None:
            _log.warning('No results for build %s', build)
            return {}
        port_name = self.host.builders.port_name_for_builder_name(
            build.builder_name)
        test_results = layout_test_results.didnt_run_as_expected_results()
        failing_results_dict = self.generate_results_dict(
            port_name, test_results)
        return failing_results_dict

    def generate_results_dict(self, full_port_name, test_results):
        """Makes a dict with results for one platform.

        Args:
            full_port_name: The fully-qualified port name, e.g. "win-win10".
            test_results: A list of LayoutTestResult objects.

        Returns:
            A dict mapping the full port name to a dict with the results for
            the given test and platform.
        """
        test_dict = {}
        for result in test_results:
            test_name = result.test_name()
            test_dict[test_name] = {
                full_port_name: {
                    'expected': result.expected_results(),
                    'actual': result.actual_results(),
                    'bug': 'crbug.com/626703'
                }
            }
        return test_dict

    def merge_dicts(self, target, source, path=None):
        """Recursively merges nested dictionaries.

        Args:
            target: First dictionary, which is updated based on source.
            source: Second dictionary, not modified.

        Returns:
            An updated target dictionary.
        """
        path = path or []
        for key in source:
            if key in target:
                if (isinstance(target[key], dict)) and isinstance(
                        source[key], dict):
                    self.merge_dicts(target[key], source[key],
                                     path + [str(key)])
                elif target[key] == source[key]:
                    pass
                else:
                    raise ValueError(
                        'The key: %s already exist in the target dictionary.' %
                        '.'.join(path))
            else:
                target[key] = source[key]
        return target

    def merge_same_valued_keys(self, dictionary):
        """Merges keys in dictionary with same value.

        Traverses through a dict and compares the values of keys to one another.
        If the values match, the keys are combined to a tuple and the previous
        keys are removed from the dict.

        Args:
            dictionary: A dictionary with a dictionary as the value.

        Returns:
            A new dictionary with updated keys to reflect matching values of keys.
            Example: {
                'one': {'foo': 'bar'},
                'two': {'foo': 'bar'},
                'three': {'foo': 'bar'}
            }
            is converted to a new dictionary with that contains
            {('one', 'two', 'three'): {'foo': 'bar'}}
        """
        merged_dict = {}
        matching_value_keys = set()
        keys = sorted(dictionary.keys())
        while keys:
            current_key = keys[0]
            found_match = False
            if current_key == keys[-1]:
                merged_dict[current_key] = dictionary[current_key]
                keys.remove(current_key)
                break

            for next_item in keys[1:]:
                if dictionary[current_key] == dictionary[next_item]:
                    found_match = True
                    matching_value_keys.update([current_key, next_item])

                if next_item == keys[-1]:
                    if found_match:
                        merged_dict[tuple(
                            matching_value_keys)] = dictionary[current_key]
                        keys = [
                            k for k in keys if k not in matching_value_keys
                        ]
                    else:
                        merged_dict[current_key] = dictionary[current_key]
                        keys.remove(current_key)
            matching_value_keys = set()
        return merged_dict

    def get_expectations(self, results):
        """Returns a set of test expectations for a given test dict.

        Returns a set of one or more test expectations based on the expected
        and actual results of a given test name.

        Args:
            results: A dictionary that maps one test to its results. Example:
                {
                    'test_name': {
                        'expected': 'PASS',
                        'actual': 'FAIL',
                        'bug': 'crbug.com/11111'
                    }
                }

        Returns:
            A set of one or more test expectation strings with the first letter
            capitalized. Example: set(['Failure', 'Timeout']).
        """
        expectations = set()
        failure_types = [
            'TEXT', 'FAIL', 'IMAGE+TEXT', 'IMAGE', 'AUDIO', 'MISSING', 'LEAK'
        ]
        test_expectation_types = [
            'SLOW', 'TIMEOUT', 'CRASH', 'PASS', 'REBASELINE',
            'NEEDSREBASELINE', 'NEEDSMANUALREBASELINE'
        ]
        for expected in results['expected'].split():
            for actual in results['actual'].split():
                if expected in test_expectation_types and actual in failure_types:
                    expectations.add('Failure')
                if expected in failure_types and actual in test_expectation_types:
                    expectations.add(actual.capitalize())
                if expected in test_expectation_types and actual in test_expectation_types:
                    expectations.add(actual.capitalize())
        return expectations

    def create_line_list(self, merged_results):
        """Creates list of test expectations lines.

        Traverses through the given |merged_results| dictionary and parses the
        value to create one test expectations line per key.

        Args:
            merged_results: A merged_results with the format:
                {
                    'test_name': {
                        'platform': {
                            'expected: 'PASS',
                            'actual': 'FAIL',
                            'bug': 'crbug.com/11111'
                        }
                    }
                }

        Returns:
            A list of test expectations lines with the format:
            ['BUG_URL [PLATFORM(S)] TEST_NAME [EXPECTATION(S)]']
        """
        line_list = []
        for test_name, port_results in sorted(merged_results.iteritems()):
            for port_names in sorted(port_results):
                if test_name.startswith('external'):
                    line_parts = [port_results[port_names]['bug']]
                    specifier_part = self.specifier_part(
                        self.to_list(port_names), test_name)
                    if specifier_part:
                        line_parts.append(specifier_part)
                    line_parts.append(test_name)
                    line_parts.append('[ %s ]' % ' '.join(
                        self.get_expectations(port_results[port_names])))
                    line_list.append(' '.join(line_parts))
        return line_list

    def specifier_part(self, port_names, test_name):
        """Returns the specifier part for a new test expectations line.

        Args:
            port_names: A list of full port names that the line should apply to.
            test_name: The test name for the expectation line.

        Returns:
            The specifier part of the new expectation line, e.g. "[ Mac ]".
            This will be an empty string if the line should apply to all platforms.
        """
        specifiers = []
        for name in sorted(port_names):
            specifiers.append(
                self.host.builders.version_specifier_for_port_name(name))
        port = self.host.port_factory.get()
        specifiers.extend(self.skipped_specifiers(test_name))
        specifiers = self.simplify_specifiers(
            specifiers, port.configuration_specifier_macros())
        if not specifiers:
            return ''
        return '[ %s ]' % ' '.join(specifiers)

    @staticmethod
    def to_list(tuple_or_value):
        """Converts a tuple to a list, and a string value to a one-item list."""
        if isinstance(tuple_or_value, tuple):
            return list(tuple_or_value)
        return [tuple_or_value]

    def skipped_specifiers(self, test_name):
        """Returns a list of platform specifiers for which the test is skipped."""
        # TODO(qyearsley): Change Port.skips_test so that this can be simplified.
        specifiers = []
        for port in self.all_try_builder_ports():
            generic_expectations = TestExpectations(port,
                                                    tests=[test_name],
                                                    include_overrides=False)
            full_expectations = TestExpectations(port,
                                                 tests=[test_name],
                                                 include_overrides=True)
            if port.skips_test(test_name, generic_expectations,
                               full_expectations):
                specifiers.append(
                    self.host.builders.version_specifier_for_port_name(
                        port.name()))
        return specifiers

    @memoized
    def all_try_builder_ports(self):
        """Returns a list of Port objects for all try builders."""
        return [
            self.host.port_factory.get_from_builder_name(name)
            for name in self._get_try_bots()
        ]

    @staticmethod
    def simplify_specifiers(specifiers, configuration_specifier_macros):  # pylint: disable=unused-argument
        """Converts some collection of specifiers to an equivalent and maybe shorter list.

        The input strings are all case-insensitive, but the strings in the
        return value will all be capitalized.

        Args:
            specifiers: A collection of lower-case specifiers.
            configuration_specifier_macros: A dict mapping "macros" for
                groups of specifiers to lists of specific specifiers. In
                practice, this is a dict mapping operating systems to
                supported versions, e.g. {"win": ["win7", "win10"]}.

        Returns:
            A shortened list of specifiers. For example, ["win7", "win10"]
            would be converted to ["Win"]. If the given list covers all
            supported platforms, then an empty list is returned.
            This list will be sorted and have capitalized specifier strings.
        """
        specifiers = {specifier.lower() for specifier in specifiers}
        for macro_specifier, version_specifiers in configuration_specifier_macros.iteritems(
        ):
            macro_specifier = macro_specifier.lower()
            version_specifiers = {
                specifier.lower()
                for specifier in version_specifiers
            }
            if version_specifiers.issubset(specifiers):
                specifiers -= version_specifiers
                specifiers.add(macro_specifier)
        if specifiers == {
                macro.lower()
                for macro in configuration_specifier_macros.keys()
        }:
            return []
        return sorted(specifier.capitalize() for specifier in specifiers)

    def write_to_test_expectations(self, line_list):
        """Writes to TestExpectations.

        The place in the file where the new lines are inserted is after a
        marker comment line. If this marker comment line is not found, it will
        be added to the end of the file.

        Args:
            line_list: A list of lines to add to the TestExpectations file.
        """
        _log.info('Lines to write to TestExpectations:')
        for line in line_list:
            _log.info('  %s', line)
        port = self.host.port_factory.get()
        expectations_file_path = port.path_to_generic_test_expectations_file()
        file_contents = self.host.filesystem.read_text_file(
            expectations_file_path)
        marker_comment_index = file_contents.find(MARKER_COMMENT)
        line_list = [
            line for line in line_list if
            self._test_name_from_expectation_string(line) not in file_contents
        ]
        if not line_list:
            return
        if marker_comment_index == -1:
            file_contents += '\n%s\n' % MARKER_COMMENT
            file_contents += '\n'.join(line_list)
        else:
            end_of_marker_line = (file_contents[marker_comment_index:].find(
                '\n')) + marker_comment_index
            file_contents = file_contents[:end_of_marker_line + 1] + '\n'.join(
                line_list) + file_contents[end_of_marker_line:]
        self.host.filesystem.write_text_file(expectations_file_path,
                                             file_contents)

    @staticmethod
    def _test_name_from_expectation_string(expectation_string):
        return TestExpectationLine.tokenize_line(
            filename='', expectation_string=expectation_string,
            line_number=0).name

    def download_text_baselines(self, tests_results):
        """Fetches new baseline files for tests that should be rebaselined.

        Invokes `webkit-patch rebaseline-cl` in order to download new baselines
        (-expected.txt files) for testharness.js tests that did not crash or
        time out. Then, the platform-specific test is removed from the overall
        failure test dictionary.

        Args:
            tests_results: A dict mapping test name to platform to test results.

        Returns:
            An updated tests_results dictionary without the platform-specific
            testharness.js tests that required new baselines to be downloaded
            from `webkit-patch rebaseline-cl`.
        """
        tests_to_rebaseline, tests_results = self.get_tests_to_rebaseline(
            tests_results)
        _log.info('Tests to rebaseline:')
        for test in tests_to_rebaseline:
            _log.info('  %s', test)
        if tests_to_rebaseline:
            webkit_patch = self.host.filesystem.join(
                self.finder.chromium_base(), self.finder.webkit_base(),
                self.finder.path_to_script('webkit-patch'))
            self.host.executive.run_command([
                'python',
                webkit_patch,
                'rebaseline-cl',
                '--verbose',
                '--no-trigger-jobs',
            ] + tests_to_rebaseline)
        return tests_results

    def get_tests_to_rebaseline(self, test_results):
        """Returns a list of tests to download new baselines for.

        Creates a list of tests to rebaseline depending on the tests' platform-
        specific results. In general, this will be non-ref tests that failed
        due to a baseline mismatch (rather than crash or timeout).

        Args:
            test_results: A dictionary of failing test results, mapping tests
                to platforms to result dicts.

        Returns:
            A pair: A set of tests to be rebaselined, and a modified copy of
            the test results dictionary. The tests to be rebaselined should
            include testharness.js tests that failed due to a baseline mismatch.
        """
        test_results = copy.deepcopy(test_results)
        tests_to_rebaseline = set()
        for test_path in test_results:
            if not (self.is_js_test(test_path)
                    and test_results.get(test_path)):
                continue
            for platform in test_results[test_path].keys():
                if test_results[test_path][platform]['actual'] not in [
                        'CRASH', 'TIMEOUT'
                ]:
                    del test_results[test_path][platform]
                    tests_to_rebaseline.add(test_path)
        return sorted(tests_to_rebaseline), test_results

    def is_js_test(self, test_path):
        """Checks whether a given file is a testharness.js test.

        Args:
            test_path: A file path relative to the layout tests directory.
                This might correspond to a deleted file or a non-test.
        """
        absolute_path = self.host.filesystem.join(
            self.finder.layout_tests_dir(), test_path)
        test_parser = TestParser(absolute_path, self.host)
        if not test_parser.test_doc:
            return False
        return test_parser.is_jstest()

    def _get_try_bots(self):
        return self.host.builders.all_try_builder_names()
Exemple #27
0
class TestImporter(object):

    def __init__(self, host, source_repo_path, options):
        self.host = host
        self.source_repo_path = source_repo_path
        self.options = options

        self.filesystem = self.host.filesystem
        self.webkit_finder = WebKitFinder(self.filesystem)
        self._webkit_root = self.webkit_finder.webkit_base()
        self.layout_tests_dir = self.webkit_finder.path_from_webkit_base('LayoutTests')
        self.destination_directory = self.filesystem.normpath(
            self.filesystem.join(
                self.layout_tests_dir,
                options.destination,
                self.filesystem.basename(self.source_repo_path)))
        self.import_in_place = (self.source_repo_path == self.destination_directory)
        self.dir_above_repo = self.filesystem.dirname(self.source_repo_path)

        self.import_list = []

        # This is just a FYI list of CSS properties that still need to be prefixed,
        # which may be output after importing.
        self._prefixed_properties = {}

    def do_import(self):
        _log.info("Importing %s into %s", self.source_repo_path, self.destination_directory)
        self.find_importable_tests()
        self.import_tests()

    def find_importable_tests(self):
        """Walks through the source directory to find what tests should be imported.

        This function sets self.import_list, which contains information about how many
        tests are being imported, and their source and destination paths.
        """
        paths_to_skip = self.find_paths_to_skip()

        for root, dirs, files in self.filesystem.walk(self.source_repo_path):
            cur_dir = root.replace(self.dir_above_repo + '/', '') + '/'
            _log.info('  scanning ' + cur_dir + '...')
            total_tests = 0
            reftests = 0
            jstests = 0

            # Files in 'tools' are not for browser testing, so we skip them.
            # See: http://testthewebforward.org/docs/test-format-guidelines.html#tools
            DIRS_TO_SKIP = ('.git', 'test-plan', 'tools')

            # We copy all files in 'support', including HTML without metadata.
            # See: http://testthewebforward.org/docs/test-format-guidelines.html#support-files
            DIRS_TO_INCLUDE = ('resources', 'support')

            if dirs:
                for d in DIRS_TO_SKIP:
                    if d in dirs:
                        dirs.remove(d)

                for path in paths_to_skip:
                    path_base = path.replace(self.options.destination + '/', '')
                    path_base = path_base.replace(cur_dir, '')
                    path_full = self.filesystem.join(root, path_base)
                    if path_base in dirs:
                        dirs.remove(path_base)
                        if not self.options.dry_run and self.import_in_place:
                            _log.info("  pruning %s", path_base)
                            self.filesystem.rmtree(path_full)
                        else:
                            _log.info("  skipping %s", path_base)

            copy_list = []

            for filename in files:
                path_full = self.filesystem.join(root, filename)
                path_base = path_full.replace(self.source_repo_path + '/', '')
                path_base = self.destination_directory.replace(self.layout_tests_dir + '/', '') + '/' + path_base
                if path_base in paths_to_skip:
                    if not self.options.dry_run and self.import_in_place:
                        _log.info("  pruning %s", path_base)
                        self.filesystem.remove(path_full)
                        continue
                    else:
                        continue
                # FIXME: This block should really be a separate function, but the early-continues make that difficult.

                if filename.startswith('.') or filename.endswith('.pl'):
                    # The w3cs repos may contain perl scripts, which we don't care about.
                    continue
                if filename == 'OWNERS' or filename == 'reftest.list':
                    # These files fail our presubmits.
                    # See http://crbug.com/584660 and http://crbug.com/582838.
                    continue

                fullpath = self.filesystem.join(root, filename)

                mimetype = mimetypes.guess_type(fullpath)
                if ('html' not in str(mimetype[0]) and
                        'application/xhtml+xml' not in str(mimetype[0]) and
                        'application/xml' not in str(mimetype[0])):
                    copy_list.append({'src': fullpath, 'dest': filename})
                    continue

                if self.filesystem.basename(root) in DIRS_TO_INCLUDE:
                    copy_list.append({'src': fullpath, 'dest': filename})
                    continue

                test_parser = TestParser(fullpath, self.host)
                test_info = test_parser.analyze_test()
                if test_info is None:
                    copy_list.append({'src': fullpath, 'dest': filename})
                    continue

                if self.path_too_long(path_full):
                    _log.warning('%s skipped due to long path. '
                                 'Max length from repo base %d chars; see http://crbug.com/609871.',
                                 path_full, MAX_PATH_LENGTH)
                    continue

                if 'reference' in test_info.keys():
                    test_basename = self.filesystem.basename(test_info['test'])
                    # Add the ref file, following WebKit style.
                    # FIXME: Ideally we'd support reading the metadata
                    # directly rather than relying on a naming convention.
                    # Using a naming convention creates duplicate copies of the
                    # reference files (http://crrev.com/268729).
                    ref_file = self.filesystem.splitext(test_basename)[0] + '-expected'
                    # Make sure to use the extension from the *reference*, not
                    # from the test, because at least flexbox tests use XHTML
                    # references but HTML tests.
                    ref_file += self.filesystem.splitext(test_info['reference'])[1]

                    if not self.filesystem.exists(test_info['reference']):
                        _log.warning('%s skipped because ref file %s was not found.',
                                     path_full, ref_file)
                        continue

                    if self.path_too_long(path_full.replace(filename, ref_file)):
                        _log.warning('%s skipped because path of ref file %s would be too long. '
                                     'Max length from repo base %d chars; see http://crbug.com/609871.',
                                     path_full, ref_file, MAX_PATH_LENGTH)
                        continue

                    reftests += 1
                    total_tests += 1
                    copy_list.append({'src': test_info['reference'], 'dest': ref_file,
                                      'reference_support_info': test_info['reference_support_info']})
                    copy_list.append({'src': test_info['test'], 'dest': filename})

                elif 'jstest' in test_info.keys():
                    jstests += 1
                    total_tests += 1
                    copy_list.append({'src': fullpath, 'dest': filename, 'is_jstest': True})

                elif self.options.all:
                    total_tests += 1
                    copy_list.append({'src': fullpath, 'dest': filename})

            if copy_list:
                # Only add this directory to the list if there's something to import
                self.import_list.append({'dirname': root, 'copy_list': copy_list,
                                         'reftests': reftests, 'jstests': jstests, 'total_tests': total_tests})

    def find_paths_to_skip(self):
        if self.options.ignore_expectations:
            return set()

        paths_to_skip = set()
        port = self.host.port_factory.get()
        w3c_import_expectations_path = self.webkit_finder.path_from_webkit_base('LayoutTests', 'W3CImportExpectations')
        w3c_import_expectations = self.filesystem.read_text_file(w3c_import_expectations_path)
        parser = TestExpectationParser(port, all_tests=(), is_lint_mode=False)
        expectation_lines = parser.parse(w3c_import_expectations_path, w3c_import_expectations)
        for line in expectation_lines:
            if 'SKIP' in line.expectations:
                if line.specifiers:
                    _log.warning("W3CImportExpectations:%s should not have any specifiers", line.line_numbers)
                    continue
                paths_to_skip.add(line.name)
        return paths_to_skip

    def import_tests(self):
        """Reads |self.import_list|, and converts and copies files to their destination."""
        total_imported_tests = 0
        total_imported_reftests = 0
        total_imported_jstests = 0

        for dir_to_copy in self.import_list:
            total_imported_tests += dir_to_copy['total_tests']
            total_imported_reftests += dir_to_copy['reftests']
            total_imported_jstests += dir_to_copy['jstests']

            if not dir_to_copy['copy_list']:
                continue

            orig_path = dir_to_copy['dirname']

            relative_dir = self.filesystem.relpath(orig_path, self.source_repo_path)
            dest_dir = self.filesystem.join(self.destination_directory, relative_dir)

            if not self.filesystem.exists(dest_dir):
                self.filesystem.maybe_make_directory(dest_dir)

            copied_files = []

            for file_to_copy in dir_to_copy['copy_list']:
                copied_file = self.copy_file(file_to_copy, dest_dir)
                if copied_file:
                    copied_files.append(copied_file)

        _log.info('')
        _log.info('Import complete')
        _log.info('')
        _log.info('IMPORTED %d TOTAL TESTS', total_imported_tests)
        _log.info('Imported %d reftests', total_imported_reftests)
        _log.info('Imported %d JS tests', total_imported_jstests)
        _log.info('Imported %d pixel/manual tests', total_imported_tests - total_imported_jstests - total_imported_reftests)
        _log.info('')

        if self._prefixed_properties:
            _log.info('Properties needing prefixes (by count):')
            for prefixed_property in sorted(self._prefixed_properties, key=lambda p: self._prefixed_properties[p]):
                _log.info('  %s: %s', prefixed_property, self._prefixed_properties[prefixed_property])

    def copy_file(self, file_to_copy, dest_dir):
        """Converts and copies a file, if it should be copied.

        Args:
            file_to_copy: A dict in a file copy list constructed by
                find_importable_tests, which represents one file to copy, including
                the keys:
                    "src": Absolute path to the source location of the file.
                    "destination": File name of the destination file.
                And possibly also the keys "reference_support_info" or "is_jstest".
            dest_dir: Path to the directory where the file should be copied.

        Returns:
            The path to the new file, relative to the Blink root (//third_party/WebKit).
        """
        source_path = self.filesystem.normpath(file_to_copy['src'])
        dest_path = self.filesystem.join(dest_dir, file_to_copy['dest'])

        if self.filesystem.isdir(source_path):
            _log.error('%s refers to a directory', source_path)
            return None

        if not self.filesystem.exists(source_path):
            _log.error('%s not found. Possible error in the test.', source_path)
            return None

        if file_to_copy.get('reference_support_info'):
            reference_support_info = file_to_copy['reference_support_info']
        else:
            reference_support_info = None

        if not self.filesystem.exists(self.filesystem.dirname(dest_path)):
            if not self.import_in_place and not self.options.dry_run:
                self.filesystem.maybe_make_directory(self.filesystem.dirname(dest_path))

        relpath = self.filesystem.relpath(dest_path, self.layout_tests_dir)
        if not self.options.overwrite and self.filesystem.exists(dest_path):
            _log.info('  skipping %s', relpath)
        else:
            # FIXME: Maybe doing a file diff is in order here for existing files?
            # In other words, there's no sense in overwriting identical files, but
            # there's no harm in copying the identical thing.
            _log.info('  %s', relpath)

        if self.should_try_to_convert(file_to_copy, source_path, dest_dir):
            converted_file = convert_for_webkit(
                dest_dir, filename=source_path,
                reference_support_info=reference_support_info,
                host=self.host)
            for prefixed_property in converted_file[0]:
                self._prefixed_properties.setdefault(prefixed_property, 0)
                self._prefixed_properties[prefixed_property] += 1

            if not self.options.dry_run:
                self.filesystem.write_text_file(dest_path, converted_file[1])
        else:
            if not self.import_in_place and not self.options.dry_run:
                self.filesystem.copyfile(source_path, dest_path)
                if self.filesystem.read_binary_file(source_path)[:2] == '#!':
                    self.filesystem.make_executable(dest_path)

        return dest_path.replace(self._webkit_root, '')

    @staticmethod
    def should_try_to_convert(file_to_copy, source_path, dest_dir):
        """Checks whether we should try to modify the file when importing."""
        if file_to_copy.get('is_jstest', False):
            return False

        # Conversion is not necessary for any tests in wpt now; see http://crbug.com/654081.
        # Note, we want to move away from converting files, see http://crbug.com/663773.
        if re.search(r'[/\\]imported[/\\]wpt[/\\]', dest_dir):
            return False

        # Only HTML, XHTML and CSS files should be converted.
        mimetype, _ = mimetypes.guess_type(source_path)
        return mimetype in ('text/html', 'application/xhtml+xml', 'text/css')

    def path_too_long(self, source_path):
        """Checks whether a source path is too long to import.

        Args:
            Absolute path of file to be imported.

        Returns:
            True if the path is too long to import, False if it's OK.
        """
        path_from_repo_base = os.path.relpath(source_path, self.source_repo_path)
        return len(path_from_repo_base) > MAX_PATH_LENGTH
class TestImporter(object):
    def __init__(self, host, dir_to_import, top_of_repo, options):
        self.host = host
        self.dir_to_import = dir_to_import
        self.top_of_repo = top_of_repo
        self.options = options

        self.filesystem = self.host.filesystem
        self.webkit_finder = WebKitFinder(self.filesystem)
        self._webkit_root = self.webkit_finder.webkit_base()
        self.layout_tests_dir = self.webkit_finder.path_from_webkit_base("LayoutTests")
        self.destination_directory = self.filesystem.normpath(
            self.filesystem.join(self.layout_tests_dir, options.destination, self.filesystem.basename(self.top_of_repo))
        )
        self.import_in_place = self.dir_to_import == self.destination_directory
        self.dir_above_repo = self.filesystem.dirname(self.top_of_repo)

        self.changeset = CHANGESET_NOT_AVAILABLE

        self.import_list = []

    def do_import(self):
        _log.info("Importing %s into %s", self.dir_to_import, self.destination_directory)
        self.find_importable_tests(self.dir_to_import)
        self.load_changeset()
        self.import_tests()

    def load_changeset(self):
        """Returns the current changeset from mercurial or "Not Available"."""
        try:
            self.changeset = self.host.executive.run_command(["hg", "tip"]).split("changeset:")[1]
        except (OSError, ScriptError):
            self.changeset = CHANGESET_NOT_AVAILABLE

    def find_importable_tests(self, directory):
        # FIXME: use filesystem
        paths_to_skip = self.find_paths_to_skip()

        for root, dirs, files in os.walk(directory):
            cur_dir = root.replace(self.dir_above_repo + "/", "") + "/"
            _log.info("  scanning " + cur_dir + "...")
            total_tests = 0
            reftests = 0
            jstests = 0

            DIRS_TO_SKIP = (".git", ".hg")
            if dirs:
                for d in DIRS_TO_SKIP:
                    if d in dirs:
                        dirs.remove(d)

                for path in paths_to_skip:
                    path_base = path.replace(self.options.destination + "/", "")
                    path_base = path_base.replace(cur_dir, "")
                    path_full = self.filesystem.join(root, path_base)
                    if path_base in dirs:
                        dirs.remove(path_base)
                        if not self.options.dry_run and self.import_in_place:
                            _log.info("  pruning %s" % path_base)
                            self.filesystem.rmtree(path_full)
                        else:
                            _log.info("  skipping %s" % path_base)

            copy_list = []

            for filename in files:
                path_full = self.filesystem.join(root, filename)
                path_base = path_full.replace(directory + "/", "")
                path_base = self.destination_directory.replace(self.layout_tests_dir + "/", "") + "/" + path_base
                if path_base in paths_to_skip:
                    if not self.options.dry_run and self.import_in_place:
                        _log.info("  pruning %s" % path_base)
                        self.filesystem.remove(path_full)
                        continue
                    else:
                        continue
                # FIXME: This block should really be a separate function, but the early-continues make that difficult.

                if filename.startswith(".") or filename.endswith(".pl"):
                    continue  # For some reason the w3c repo contains random perl scripts we don't care about.

                fullpath = os.path.join(root, filename)

                mimetype = mimetypes.guess_type(fullpath)
                if (
                    not "html" in str(mimetype[0])
                    and not "application/xhtml+xml" in str(mimetype[0])
                    and not "application/xml" in str(mimetype[0])
                ):
                    copy_list.append({"src": fullpath, "dest": filename})
                    continue

                if root.endswith("resources"):
                    copy_list.append({"src": fullpath, "dest": filename})
                    continue

                test_parser = TestParser(vars(self.options), filename=fullpath)
                test_info = test_parser.analyze_test()
                if test_info is None:
                    continue

                if "reference" in test_info.keys():
                    reftests += 1
                    total_tests += 1
                    test_basename = os.path.basename(test_info["test"])

                    # Add the ref file, following WebKit style.
                    # FIXME: Ideally we'd support reading the metadata
                    # directly rather than relying  on a naming convention.
                    # Using a naming convention creates duplicate copies of the
                    # reference files.
                    ref_file = os.path.splitext(test_basename)[0] + "-expected"
                    # Make sure to use the extension from the *reference*, not
                    # from the test, because at least flexbox tests use XHTML
                    # references but HTML tests.
                    ref_file += os.path.splitext(test_info["reference"])[1]

                    copy_list.append(
                        {
                            "src": test_info["reference"],
                            "dest": ref_file,
                            "reference_support_info": test_info["reference_support_info"],
                        }
                    )
                    copy_list.append({"src": test_info["test"], "dest": filename})

                elif "jstest" in test_info.keys():
                    jstests += 1
                    total_tests += 1
                    copy_list.append({"src": fullpath, "dest": filename})
                else:
                    total_tests += 1
                    copy_list.append({"src": fullpath, "dest": filename})

            if copy_list:
                # Only add this directory to the list if there's something to import
                self.import_list.append(
                    {
                        "dirname": root,
                        "copy_list": copy_list,
                        "reftests": reftests,
                        "jstests": jstests,
                        "total_tests": total_tests,
                    }
                )

    def find_paths_to_skip(self):
        if self.options.ignore_expectations:
            return set()

        paths_to_skip = set()
        port = self.host.port_factory.get()
        w3c_import_expectations_path = self.webkit_finder.path_from_webkit_base("LayoutTests", "W3CImportExpectations")
        w3c_import_expectations = self.filesystem.read_text_file(w3c_import_expectations_path)
        parser = TestExpectationParser(port, full_test_list=(), is_lint_mode=False)
        expectation_lines = parser.parse(w3c_import_expectations_path, w3c_import_expectations)
        for line in expectation_lines:
            if "SKIP" in line.expectations:
                if line.specifiers:
                    _log.warning("W3CImportExpectations:%s should not have any specifiers" % line.line_numbers)
                    continue
                paths_to_skip.add(line.name)
        return paths_to_skip

    def import_tests(self):
        total_imported_tests = 0
        total_imported_reftests = 0
        total_imported_jstests = 0
        total_prefixed_properties = {}

        for dir_to_copy in self.import_list:
            total_imported_tests += dir_to_copy["total_tests"]
            total_imported_reftests += dir_to_copy["reftests"]
            total_imported_jstests += dir_to_copy["jstests"]

            prefixed_properties = []

            if not dir_to_copy["copy_list"]:
                continue

            orig_path = dir_to_copy["dirname"]

            subpath = os.path.relpath(orig_path, self.top_of_repo)
            new_path = os.path.join(self.destination_directory, subpath)

            if not (os.path.exists(new_path)):
                os.makedirs(new_path)

            copied_files = []

            for file_to_copy in dir_to_copy["copy_list"]:
                # FIXME: Split this block into a separate function.
                orig_filepath = os.path.normpath(file_to_copy["src"])

                if os.path.isdir(orig_filepath):
                    # FIXME: Figure out what is triggering this and what to do about it.
                    _log.error("%s refers to a directory" % orig_filepath)
                    continue

                if not (os.path.exists(orig_filepath)):
                    _log.warning("%s not found. Possible error in the test.", orig_filepath)
                    continue

                new_filepath = os.path.join(new_path, file_to_copy["dest"])
                if "reference_support_info" in file_to_copy.keys() and file_to_copy["reference_support_info"] != {}:
                    reference_support_info = file_to_copy["reference_support_info"]
                else:
                    reference_support_info = None

                if not (os.path.exists(os.path.dirname(new_filepath))):
                    if not self.import_in_place and not self.options.dry_run:
                        os.makedirs(os.path.dirname(new_filepath))

                relpath = os.path.relpath(new_filepath, self.layout_tests_dir)
                if not self.options.overwrite and os.path.exists(new_filepath):
                    _log.info("  skipping %s" % relpath)
                else:
                    # FIXME: Maybe doing a file diff is in order here for existing files?
                    # In other words, there's no sense in overwriting identical files, but
                    # there's no harm in copying the identical thing.
                    _log.info("  %s" % relpath)

                # Only html, xml, or css should be converted
                # FIXME: Eventually, so should js when support is added for this type of conversion
                mimetype = mimetypes.guess_type(orig_filepath)
                if "html" in str(mimetype[0]) or "xml" in str(mimetype[0]) or "css" in str(mimetype[0]):
                    converted_file = convert_for_webkit(
                        new_path, filename=orig_filepath, reference_support_info=reference_support_info
                    )

                    if not converted_file:
                        if not self.import_in_place and not self.options.dry_run:
                            shutil.copyfile(orig_filepath, new_filepath)  # The file was unmodified.
                    else:
                        for prefixed_property in converted_file[0]:
                            total_prefixed_properties.setdefault(prefixed_property, 0)
                            total_prefixed_properties[prefixed_property] += 1

                        prefixed_properties.extend(set(converted_file[0]) - set(prefixed_properties))
                        if not self.options.dry_run:
                            outfile = open(new_filepath, "wb")
                            outfile.write(converted_file[1])
                            outfile.close()
                else:
                    if not self.import_in_place and not self.options.dry_run:
                        shutil.copyfile(orig_filepath, new_filepath)

                copied_files.append(new_filepath.replace(self._webkit_root, ""))

        _log.info("")
        _log.info("Import complete")
        _log.info("")
        _log.info("IMPORTED %d TOTAL TESTS", total_imported_tests)
        _log.info("Imported %d reftests", total_imported_reftests)
        _log.info("Imported %d JS tests", total_imported_jstests)
        _log.info(
            "Imported %d pixel/manual tests", total_imported_tests - total_imported_jstests - total_imported_reftests
        )
        _log.info("")

        if total_prefixed_properties:
            _log.info("Properties needing prefixes (by count):")
            for prefixed_property in sorted(total_prefixed_properties, key=lambda p: total_prefixed_properties[p]):
                _log.info("  %s: %s", prefixed_property, total_prefixed_properties[prefixed_property])

    def setup_destination_directory(self):
        """ Creates a destination directory that mirrors that of the source directory """

        new_subpath = self.dir_to_import[len(self.top_of_repo) :]

        destination_directory = os.path.join(self.destination_directory, new_subpath)

        if not os.path.exists(destination_directory):
            os.makedirs(destination_directory)

        _log.info("Tests will be imported into: %s", destination_directory)
Exemple #29
0
class TestImporter(object):
    def __init__(self, host, dir_to_import, top_of_repo, options):
        self.host = host
        self.dir_to_import = dir_to_import
        self.top_of_repo = top_of_repo
        self.options = options

        self.filesystem = self.host.filesystem
        self.webkit_finder = WebKitFinder(self.filesystem)
        self._webkit_root = self.webkit_finder.webkit_base()
        self.layout_tests_dir = self.webkit_finder.path_from_webkit_base(
            'LayoutTests')
        self.destination_directory = self.filesystem.normpath(
            self.filesystem.join(self.layout_tests_dir, options.destination,
                                 self.filesystem.basename(self.top_of_repo)))
        self.import_in_place = (
            self.dir_to_import == self.destination_directory)

        self.changeset = CHANGESET_NOT_AVAILABLE
        self.test_status = TEST_STATUS_UNKNOWN

        self.import_list = []

    def do_import(self):
        _log.info("Importing %s into %s", self.dir_to_import,
                  self.destination_directory)
        self.find_importable_tests(self.dir_to_import)
        self.load_changeset()
        self.import_tests()

    def load_changeset(self):
        """Returns the current changeset from mercurial or "Not Available"."""
        try:
            self.changeset = self.host.executive.run_command(
                ['hg', 'tip']).split('changeset:')[1]
        except (OSError, ScriptError):
            self.changeset = CHANGESET_NOT_AVAILABLE

    def find_importable_tests(self, directory):
        # FIXME: use filesystem
        paths_to_skip = self.find_paths_to_skip()

        for root, dirs, files in os.walk(directory):
            cur_dir = root.replace(self.layout_tests_dir + '/', '') + '/'
            _log.info('Scanning ' + cur_dir + '...')
            total_tests = 0
            reftests = 0
            jstests = 0

            # "archive" and "data" dirs are internal csswg things that live in every approved directory.
            # FIXME: skip 'incoming' tests for now, but we should rework the 'test_status' concept and
            # support reading them as well.
            DIRS_TO_SKIP = ('.git', '.hg', 'data', 'archive', 'incoming')
            if dirs:
                for d in DIRS_TO_SKIP:
                    if d in dirs:
                        dirs.remove(d)

                for path in paths_to_skip:
                    path_base = path.replace(cur_dir, '')
                    path_full = self.filesystem.join(root, path_base)
                    if path_base in dirs:
                        dirs.remove(path_base)
                        if not self.options.dry_run and self.import_in_place:
                            _log.info("Pruning %s" % path_full)
                            self.filesystem.rmtree(path_full)

            copy_list = []

            for filename in files:
                path_full = self.filesystem.join(root, filename)
                path_base = path_full.replace(self.layout_tests_dir + '/', '')
                if path_base in paths_to_skip:
                    if not self.options.dry_run and self.import_in_place:
                        _log.info("Pruning %s" % path_base)
                        self.filesystem.remove(path_full)
                        continue
                # FIXME: This block should really be a separate function, but the early-continues make that difficult.

                if filename.startswith('.') or filename.endswith('.pl'):
                    continue  # For some reason the w3c repo contains random perl scripts we don't care about.

                fullpath = os.path.join(root, filename)

                mimetype = mimetypes.guess_type(fullpath)
                if not 'html' in str(mimetype[0]) and not 'xml' in str(
                        mimetype[0]):
                    copy_list.append({'src': fullpath, 'dest': filename})
                    continue

                if root.endswith('resources'):
                    copy_list.append({'src': fullpath, 'dest': filename})
                    continue

                test_parser = TestParser(vars(self.options), filename=fullpath)
                test_info = test_parser.analyze_test()
                if test_info is None:
                    continue

                if 'reference' in test_info.keys():
                    reftests += 1
                    total_tests += 1
                    test_basename = os.path.basename(test_info['test'])

                    # Add the ref file, following WebKit style.
                    # FIXME: Ideally we'd support reading the metadata
                    # directly rather than relying  on a naming convention.
                    # Using a naming convention creates duplicate copies of the
                    # reference files.
                    ref_file = os.path.splitext(test_basename)[0] + '-expected'
                    ref_file += os.path.splitext(test_basename)[1]

                    copy_list.append({
                        'src': test_info['reference'],
                        'dest': ref_file
                    })
                    copy_list.append({
                        'src': test_info['test'],
                        'dest': filename
                    })

                    # Update any support files that need to move as well to remain relative to the -expected file.
                    if 'refsupport' in test_info.keys():
                        for support_file in test_info['refsupport']:
                            source_file = os.path.join(
                                os.path.dirname(test_info['reference']),
                                support_file)
                            source_file = os.path.normpath(source_file)

                            # Keep the dest as it was
                            to_copy = {
                                'src': source_file,
                                'dest': support_file
                            }

                            # Only add it once
                            if not (to_copy in copy_list):
                                copy_list.append(to_copy)
                elif 'jstest' in test_info.keys():
                    jstests += 1
                    total_tests += 1
                    copy_list.append({'src': fullpath, 'dest': filename})
                else:
                    total_tests += 1
                    copy_list.append({'src': fullpath, 'dest': filename})

            if not total_tests:
                # We can skip the support directory if no tests were found.
                if 'support' in dirs:
                    dirs.remove('support')

            if copy_list:
                # Only add this directory to the list if there's something to import
                self.import_list.append({
                    'dirname': root,
                    'copy_list': copy_list,
                    'reftests': reftests,
                    'jstests': jstests,
                    'total_tests': total_tests
                })

    def find_paths_to_skip(self):
        if self.options.ignore_expectations:
            return set()

        paths_to_skip = set()
        port = self.host.port_factory.get()
        w3c_import_expectations_path = self.webkit_finder.path_from_webkit_base(
            'LayoutTests', 'W3CImportExpectations')
        w3c_import_expectations = self.filesystem.read_text_file(
            w3c_import_expectations_path)
        parser = TestExpectationParser(port,
                                       full_test_list=(),
                                       is_lint_mode=False)
        expectation_lines = parser.parse(w3c_import_expectations_path,
                                         w3c_import_expectations)
        for line in expectation_lines:
            if 'SKIP' in line.expectations:
                if line.specifiers:
                    _log.warning(
                        "W3CImportExpectations:%s should not have any specifiers"
                        % line.line_numbers)
                    continue
                paths_to_skip.add(line.name)
        return paths_to_skip

    def import_tests(self):
        total_imported_tests = 0
        total_imported_reftests = 0
        total_imported_jstests = 0
        total_prefixed_properties = {}

        for dir_to_copy in self.import_list:
            total_imported_tests += dir_to_copy['total_tests']
            total_imported_reftests += dir_to_copy['reftests']
            total_imported_jstests += dir_to_copy['jstests']

            prefixed_properties = []

            if not dir_to_copy['copy_list']:
                continue

            orig_path = dir_to_copy['dirname']

            subpath = os.path.relpath(orig_path, self.top_of_repo)
            new_path = os.path.join(self.destination_directory, subpath)

            if not (os.path.exists(new_path)):
                os.makedirs(new_path)

            copied_files = []

            for file_to_copy in dir_to_copy['copy_list']:
                # FIXME: Split this block into a separate function.
                orig_filepath = os.path.normpath(file_to_copy['src'])

                if os.path.isdir(orig_filepath):
                    # FIXME: Figure out what is triggering this and what to do about it.
                    _log.error('%s refers to a directory' % orig_filepath)
                    continue

                if not (os.path.exists(orig_filepath)):
                    _log.warning('%s not found. Possible error in the test.',
                                 orig_filepath)
                    continue

                new_filepath = os.path.join(new_path, file_to_copy['dest'])

                if not (os.path.exists(os.path.dirname(new_filepath))):
                    if not self.import_in_place and not self.options.dry_run:
                        os.makedirs(os.path.dirname(new_filepath))

                if not self.options.overwrite and os.path.exists(new_filepath):
                    _log.info('Skipping import of existing file ' +
                              new_filepath)
                else:
                    # FIXME: Maybe doing a file diff is in order here for existing files?
                    # In other words, there's no sense in overwriting identical files, but
                    # there's no harm in copying the identical thing.
                    _log.info('Importing: %s', orig_filepath)
                    _log.info('       As: %s', new_filepath)

                # Only html, xml, or css should be converted
                # FIXME: Eventually, so should js when support is added for this type of conversion
                mimetype = mimetypes.guess_type(orig_filepath)
                if 'html' in str(mimetype[0]) or 'xml' in str(
                        mimetype[0]) or 'css' in str(mimetype[0]):
                    converted_file = convert_for_webkit(new_path,
                                                        filename=orig_filepath)

                    if not converted_file:
                        if not self.import_in_place and not self.options.dry_run:
                            shutil.copyfile(
                                orig_filepath,
                                new_filepath)  # The file was unmodified.
                    else:
                        for prefixed_property in converted_file[0]:
                            total_prefixed_properties.setdefault(
                                prefixed_property, 0)
                            total_prefixed_properties[prefixed_property] += 1

                        prefixed_properties.extend(
                            set(converted_file[0]) - set(prefixed_properties))
                        if not self.options.dry_run:
                            outfile = open(new_filepath, 'wb')
                            outfile.write(converted_file[1])
                            outfile.close()
                else:
                    if not self.import_in_place and not self.options.dry_run:
                        shutil.copyfile(orig_filepath, new_filepath)

                copied_files.append(new_filepath.replace(
                    self._webkit_root, ''))

            if not self.options.import_in_place and not self.options.dry_run:
                self.remove_deleted_files(new_path, copied_files)
                self.write_import_log(new_path, copied_files,
                                      prefixed_properties)

        _log.info('Import complete')

        _log.info('IMPORTED %d TOTAL TESTS', total_imported_tests)
        _log.info('Imported %d reftests', total_imported_reftests)
        _log.info('Imported %d JS tests', total_imported_jstests)
        _log.info(
            'Imported %d pixel/manual tests', total_imported_tests -
            total_imported_jstests - total_imported_reftests)
        _log.info('')
        _log.info('Properties needing prefixes (by count):')
        for prefixed_property in sorted(
                total_prefixed_properties,
                key=lambda p: total_prefixed_properties[p]):
            _log.info('  %s: %s', prefixed_property,
                      total_prefixed_properties[prefixed_property])

    def setup_destination_directory(self):
        """ Creates a destination directory that mirrors that of the source approved or submitted directory """

        self.update_test_status()

        start = self.dir_to_import.find(self.test_status)
        new_subpath = self.dir_to_import[len(self.top_of_repo):]

        destination_directory = os.path.join(self.destination_directory,
                                             new_subpath)

        if not os.path.exists(destination_directory):
            os.makedirs(destination_directory)

        _log.info('Tests will be imported into: %s', destination_directory)

    def update_test_status(self):
        """ Sets the test status to either 'approved' or 'submitted' """

        status = TEST_STATUS_UNKNOWN

        directory_parts = self.dir_to_import.split(os.path.sep)
        for test_status in VALID_TEST_STATUSES:
            if test_status in directory_parts:
                status = test_status

        self.test_status = status

    def remove_deleted_files(self, dir_to_import, new_file_list):
        previous_file_list = []

        import_log_file = os.path.join(dir_to_import, 'w3c-import.log')
        if not os.path.exists(import_log_file):
            return

        import_log = open(import_log_file, 'r')
        contents = import_log.readlines()

        if 'List of files\n' in contents:
            list_index = contents.index('List of files:\n') + 1
            previous_file_list = [
                filename.strip() for filename in contents[list_index:]
            ]

        deleted_files = set(previous_file_list) - set(new_file_list)
        for deleted_file in deleted_files:
            _log.info('Deleting file removed from the W3C repo: %s',
                      deleted_file)
            deleted_file = os.path.join(self._webkit_root, deleted_file)
            os.remove(deleted_file)

        import_log.close()

    def write_import_log(self, dir_to_import, file_list, prop_list):
        now = datetime.datetime.now()

        import_log = open(os.path.join(dir_to_import, 'w3c-import.log'), 'w')
        import_log.write(
            'The tests in this directory were imported from the W3C repository.\n'
        )
        import_log.write(
            'Do NOT modify these tests directly in Webkit. Instead, push changes to the W3C CSS repo:\n\n'
        )
        import_log.write('http://hg.csswg.org/test\n\n')
        import_log.write(
            'Then run the Tools/Scripts/import-w3c-tests in Webkit to reimport\n\n'
        )
        import_log.write('Do NOT modify or remove this file\n\n')
        import_log.write(
            '------------------------------------------------------------------------\n'
        )
        import_log.write('Last Import: ' + now.strftime('%Y-%m-%d %H:%M') +
                         '\n')
        import_log.write('W3C Mercurial changeset: ' + self.changeset + '\n')
        import_log.write('Test status at time of import: ' + self.test_status +
                         '\n')
        import_log.write(
            '------------------------------------------------------------------------\n'
        )
        import_log.write('Properties requiring vendor prefixes:\n')
        if prop_list:
            for prop in prop_list:
                import_log.write(prop + '\n')
        else:
            import_log.write('None\n')
        import_log.write(
            '------------------------------------------------------------------------\n'
        )
        import_log.write('List of files:\n')
        for item in file_list:
            import_log.write(item + '\n')

        import_log.close()
Exemple #30
0
class TestImporter(object):

    def __init__(self, host, dir_to_import, top_of_repo, options):
        self.host = host
        self.dir_to_import = dir_to_import
        self.top_of_repo = top_of_repo
        self.options = options

        self.filesystem = self.host.filesystem
        self.webkit_finder = WebKitFinder(self.filesystem)
        self._webkit_root = self.webkit_finder.webkit_base()
        self.layout_tests_dir = self.webkit_finder.path_from_webkit_base('LayoutTests')
        self.destination_directory = self.filesystem.normpath(self.filesystem.join(self.layout_tests_dir, options.destination,
                                                                                   self.filesystem.basename(self.top_of_repo)))
        self.import_in_place = (self.dir_to_import == self.destination_directory)

        self.changeset = CHANGESET_NOT_AVAILABLE
        self.test_status = TEST_STATUS_UNKNOWN

        self.import_list = []

    def do_import(self):
        _log.info("Importing %s into %s", self.dir_to_import, self.destination_directory)
        self.find_importable_tests(self.dir_to_import)
        self.load_changeset()
        self.import_tests()

    def load_changeset(self):
        """Returns the current changeset from mercurial or "Not Available"."""
        try:
            self.changeset = self.host.executive.run_command(['hg', 'tip']).split('changeset:')[1]
        except (OSError, ScriptError):
            self.changeset = CHANGESET_NOT_AVAILABLE

    def find_importable_tests(self, directory):
        # FIXME: use filesystem
        paths_to_skip = self.find_paths_to_skip()

        for root, dirs, files in os.walk(directory):
            cur_dir = root.replace(self.layout_tests_dir + '/', '') + '/'
            _log.info('  scanning ' + cur_dir + '...')
            total_tests = 0
            reftests = 0
            jstests = 0

            # "archive" and "data" dirs are internal csswg things that live in every approved directory.
            # FIXME: skip 'incoming' tests for now, but we should rework the 'test_status' concept and
            # support reading them as well.
            DIRS_TO_SKIP = ('.git', '.hg', 'data', 'archive', 'incoming')
            if dirs:
                for d in DIRS_TO_SKIP:
                    if d in dirs:
                        dirs.remove(d)

                for path in paths_to_skip:
                    path_base = path.replace(cur_dir, '')
                    path_full = self.filesystem.join(root, path_base)
                    if path_base in dirs:
                        dirs.remove(path_base)
                        if not self.options.dry_run and self.import_in_place:
                            _log.info("  pruning %s" % path_base)
                            self.filesystem.rmtree(path_full)

            copy_list = []

            for filename in files:
                path_full = self.filesystem.join(root, filename)
                path_base = path_full.replace(self.layout_tests_dir + '/', '')
                if path_base in paths_to_skip:
                    if not self.options.dry_run and self.import_in_place:
                        _log.info("  pruning %s" % path_base)
                        self.filesystem.remove(path_full)
                        continue
                # FIXME: This block should really be a separate function, but the early-continues make that difficult.

                if filename.startswith('.') or filename.endswith('.pl'):
                    continue  # For some reason the w3c repo contains random perl scripts we don't care about.

                fullpath = os.path.join(root, filename)

                mimetype = mimetypes.guess_type(fullpath)
                if not 'html' in str(mimetype[0]) and not 'xml' in str(mimetype[0]):
                    copy_list.append({'src': fullpath, 'dest': filename})
                    continue

                if root.endswith('resources'):
                    copy_list.append({'src': fullpath, 'dest': filename})
                    continue

                test_parser = TestParser(vars(self.options), filename=fullpath)
                test_info = test_parser.analyze_test()
                if test_info is None:
                    continue

                if 'reference' in test_info.keys():
                    reftests += 1
                    total_tests += 1
                    test_basename = os.path.basename(test_info['test'])

                    # Add the ref file, following WebKit style.
                    # FIXME: Ideally we'd support reading the metadata
                    # directly rather than relying  on a naming convention.
                    # Using a naming convention creates duplicate copies of the
                    # reference files.
                    ref_file = os.path.splitext(test_basename)[0] + '-expected'
                    ref_file += os.path.splitext(test_basename)[1]

                    copy_list.append({'src': test_info['reference'], 'dest': ref_file})
                    copy_list.append({'src': test_info['test'], 'dest': filename})

                    # Update any support files that need to move as well to remain relative to the -expected file.
                    if 'refsupport' in test_info.keys():
                        for support_file in test_info['refsupport']:
                            source_file = os.path.join(os.path.dirname(test_info['reference']), support_file)
                            source_file = os.path.normpath(source_file)

                            # Keep the dest as it was
                            to_copy = {'src': source_file, 'dest': support_file}

                            # Only add it once
                            if not(to_copy in copy_list):
                                copy_list.append(to_copy)
                elif 'jstest' in test_info.keys():
                    jstests += 1
                    total_tests += 1
                    copy_list.append({'src': fullpath, 'dest': filename})
                else:
                    total_tests += 1
                    copy_list.append({'src': fullpath, 'dest': filename})

            if not total_tests:
                # We can skip the support directory if no tests were found.
                if 'support' in dirs:
                    dirs.remove('support')

            if copy_list:
                # Only add this directory to the list if there's something to import
                self.import_list.append({'dirname': root, 'copy_list': copy_list,
                    'reftests': reftests, 'jstests': jstests, 'total_tests': total_tests})

    def find_paths_to_skip(self):
        if self.options.ignore_expectations:
            return set()

        paths_to_skip = set()
        port = self.host.port_factory.get()
        w3c_import_expectations_path = self.webkit_finder.path_from_webkit_base('LayoutTests', 'W3CImportExpectations')
        w3c_import_expectations = self.filesystem.read_text_file(w3c_import_expectations_path)
        parser = TestExpectationParser(port, full_test_list=(), is_lint_mode=False)
        expectation_lines = parser.parse(w3c_import_expectations_path, w3c_import_expectations)
        for line in expectation_lines:
            if 'SKIP' in line.expectations:
                if line.specifiers:
                    _log.warning("W3CImportExpectations:%s should not have any specifiers" % line.line_numbers)
                    continue
                paths_to_skip.add(line.name)
        return paths_to_skip

    def import_tests(self):
        total_imported_tests = 0
        total_imported_reftests = 0
        total_imported_jstests = 0
        total_prefixed_properties = {}

        for dir_to_copy in self.import_list:
            total_imported_tests += dir_to_copy['total_tests']
            total_imported_reftests += dir_to_copy['reftests']
            total_imported_jstests += dir_to_copy['jstests']

            prefixed_properties = []

            if not dir_to_copy['copy_list']:
                continue

            orig_path = dir_to_copy['dirname']

            subpath = os.path.relpath(orig_path, self.top_of_repo)
            new_path = os.path.join(self.destination_directory, subpath)

            if not(os.path.exists(new_path)):
                os.makedirs(new_path)

            copied_files = []

            for file_to_copy in dir_to_copy['copy_list']:
                # FIXME: Split this block into a separate function.
                orig_filepath = os.path.normpath(file_to_copy['src'])

                if os.path.isdir(orig_filepath):
                    # FIXME: Figure out what is triggering this and what to do about it.
                    _log.error('%s refers to a directory' % orig_filepath)
                    continue

                if not(os.path.exists(orig_filepath)):
                    _log.warning('%s not found. Possible error in the test.', orig_filepath)
                    continue

                new_filepath = os.path.join(new_path, file_to_copy['dest'])

                if not(os.path.exists(os.path.dirname(new_filepath))):
                    if not self.import_in_place and not self.options.dry_run:
                        os.makedirs(os.path.dirname(new_filepath))

                if not self.options.overwrite and os.path.exists(new_filepath):
                    _log.info('  skipping import of existing file ' + new_filepath)
                else:
                    # FIXME: Maybe doing a file diff is in order here for existing files?
                    # In other words, there's no sense in overwriting identical files, but
                    # there's no harm in copying the identical thing.
                    _log.info('  importing %s', os.path.relpath(new_filepath, self.layout_tests_dir))

                # Only html, xml, or css should be converted
                # FIXME: Eventually, so should js when support is added for this type of conversion
                mimetype = mimetypes.guess_type(orig_filepath)
                if 'html' in str(mimetype[0]) or 'xml' in str(mimetype[0])  or 'css' in str(mimetype[0]):
                    converted_file = convert_for_webkit(new_path, filename=orig_filepath)

                    if not converted_file:
                        if not self.import_in_place and not self.options.dry_run:
                            shutil.copyfile(orig_filepath, new_filepath)  # The file was unmodified.
                    else:
                        for prefixed_property in converted_file[0]:
                            total_prefixed_properties.setdefault(prefixed_property, 0)
                            total_prefixed_properties[prefixed_property] += 1

                        prefixed_properties.extend(set(converted_file[0]) - set(prefixed_properties))
                        if not self.options.dry_run:
                            outfile = open(new_filepath, 'wb')
                            outfile.write(converted_file[1])
                            outfile.close()
                else:
                    if not self.import_in_place and not self.options.dry_run:
                        shutil.copyfile(orig_filepath, new_filepath)

                copied_files.append(new_filepath.replace(self._webkit_root, ''))

            if not self.import_in_place and not self.options.dry_run:
                self.remove_deleted_files(new_path, copied_files)
                self.write_import_log(new_path, copied_files, prefixed_properties)

        _log.info('')
        _log.info('Import complete')
        _log.info('')
        _log.info('IMPORTED %d TOTAL TESTS', total_imported_tests)
        _log.info('Imported %d reftests', total_imported_reftests)
        _log.info('Imported %d JS tests', total_imported_jstests)
        _log.info('Imported %d pixel/manual tests', total_imported_tests - total_imported_jstests - total_imported_reftests)
        _log.info('')
        _log.info('Properties needing prefixes (by count):')
        for prefixed_property in sorted(total_prefixed_properties, key=lambda p: total_prefixed_properties[p]):
            _log.info('  %s: %s', prefixed_property, total_prefixed_properties[prefixed_property])

    def setup_destination_directory(self):
        """ Creates a destination directory that mirrors that of the source approved or submitted directory """

        self.update_test_status()

        start = self.dir_to_import.find(self.test_status)
        new_subpath = self.dir_to_import[len(self.top_of_repo):]

        destination_directory = os.path.join(self.destination_directory, new_subpath)

        if not os.path.exists(destination_directory):
            os.makedirs(destination_directory)

        _log.info('Tests will be imported into: %s', destination_directory)

    def update_test_status(self):
        """ Sets the test status to either 'approved' or 'submitted' """

        status = TEST_STATUS_UNKNOWN

        directory_parts = self.dir_to_import.split(os.path.sep)
        for test_status in VALID_TEST_STATUSES:
            if test_status in directory_parts:
                status = test_status

        self.test_status = status

    def remove_deleted_files(self, dir_to_import, new_file_list):
        previous_file_list = []

        import_log_file = os.path.join(dir_to_import, 'w3c-import.log')
        if not os.path.exists(import_log_file):
            return

        import_log = open(import_log_file, 'r')
        contents = import_log.readlines()

        if 'List of files\n' in contents:
            list_index = contents.index('List of files:\n') + 1
            previous_file_list = [filename.strip() for filename in contents[list_index:]]

        deleted_files = set(previous_file_list) - set(new_file_list)
        for deleted_file in deleted_files:
            _log.info('Deleting file removed from the W3C repo: %s', deleted_file)
            deleted_file = os.path.join(self._webkit_root, deleted_file)
            os.remove(deleted_file)

        import_log.close()

    def write_import_log(self, dir_to_import, file_list, prop_list):
        now = datetime.datetime.now()

        import_log = open(os.path.join(dir_to_import, 'w3c-import.log'), 'w')
        import_log.write('The tests in this directory were imported from the W3C repository.\n')
        import_log.write('Do NOT modify these tests directly in Webkit. Instead, push changes to the W3C CSS repo:\n\n')
        import_log.write('http://hg.csswg.org/test\n\n')
        import_log.write('Then run the Tools/Scripts/import-w3c-tests in Webkit to reimport\n\n')
        import_log.write('Do NOT modify or remove this file\n\n')
        import_log.write('------------------------------------------------------------------------\n')
        import_log.write('Last Import: ' + now.strftime('%Y-%m-%d %H:%M') + '\n')
        import_log.write('W3C Mercurial changeset: ' + self.changeset + '\n')
        import_log.write('Test status at time of import: ' + self.test_status + '\n')
        import_log.write('------------------------------------------------------------------------\n')
        import_log.write('Properties requiring vendor prefixes:\n')
        if prop_list:
            for prop in prop_list:
                import_log.write(prop + '\n')
        else:
            import_log.write('None\n')
        import_log.write('------------------------------------------------------------------------\n')
        import_log.write('List of files:\n')
        for item in file_list:
            import_log.write(item + '\n')

        import_log.close()
Exemple #31
0
class TestImporter(object):

    def __init__(self, host, dir_to_import, top_of_repo, options):
        self.host = host
        self.dir_to_import = dir_to_import
        self.top_of_repo = top_of_repo
        self.options = options

        self.filesystem = self.host.filesystem
        self.webkit_finder = WebKitFinder(self.filesystem)
        self._webkit_root = self.webkit_finder.webkit_base()
        self.layout_tests_dir = self.webkit_finder.path_from_webkit_base('LayoutTests')
        self.destination_directory = self.filesystem.normpath(self.filesystem.join(self.layout_tests_dir, options.destination,
                                                                                   self.filesystem.basename(self.top_of_repo)))
        self.import_in_place = (self.dir_to_import == self.destination_directory)
        self.dir_above_repo = self.filesystem.dirname(self.top_of_repo)

        self.import_list = []

    def do_import(self):
        _log.info("Importing %s into %s", self.dir_to_import, self.destination_directory)
        self.find_importable_tests(self.dir_to_import)
        self.import_tests()

    def find_importable_tests(self, directory):
        paths_to_skip = self.find_paths_to_skip()

        for root, dirs, files in self.filesystem.walk(directory):
            cur_dir = root.replace(self.dir_above_repo + '/', '') + '/'
            _log.info('  scanning ' + cur_dir + '...')
            total_tests = 0
            reftests = 0
            jstests = 0

            # Files in 'tools' are not for browser testing (e.g., a script for generating test files).
            # http://testthewebforward.org/docs/test-format-guidelines.html#tools
            DIRS_TO_SKIP = ('.git', 'test-plan', 'tools')

            # Need to copy all files in 'support', including HTML without meta data.
            # http://testthewebforward.org/docs/test-format-guidelines.html#support-files
            DIRS_TO_INCLUDE = ('resources', 'support')

            if dirs:
                for d in DIRS_TO_SKIP:
                    if d in dirs:
                        dirs.remove(d)

                for path in paths_to_skip:
                    path_base = path.replace(self.options.destination + '/', '')
                    path_base = path_base.replace(cur_dir, '')
                    path_full = self.filesystem.join(root, path_base)
                    if path_base in dirs:
                        dirs.remove(path_base)
                        if not self.options.dry_run and self.import_in_place:
                            _log.info("  pruning %s" % path_base)
                            self.filesystem.rmtree(path_full)
                        else:
                            _log.info("  skipping %s" % path_base)

            copy_list = []

            for filename in files:
                path_full = self.filesystem.join(root, filename)
                path_base = path_full.replace(directory + '/', '')
                path_base = self.destination_directory.replace(self.layout_tests_dir + '/', '') + '/' + path_base
                if path_base in paths_to_skip:
                    if not self.options.dry_run and self.import_in_place:
                        _log.info("  pruning %s" % path_base)
                        self.filesystem.remove(path_full)
                        continue
                    else:
                        continue
                # FIXME: This block should really be a separate function, but the early-continues make that difficult.

                if filename.startswith('.') or filename.endswith('.pl'):
                    continue  # For some reason the w3c repo contains random perl scripts we don't care about.

                fullpath = self.filesystem.join(root, filename)

                mimetype = mimetypes.guess_type(fullpath)
                if not 'html' in str(mimetype[0]) and not 'application/xhtml+xml' in str(mimetype[0]) and not 'application/xml' in str(mimetype[0]):
                    copy_list.append({'src': fullpath, 'dest': filename})
                    continue

                if self.filesystem.basename(root) in DIRS_TO_INCLUDE:
                    copy_list.append({'src': fullpath, 'dest': filename})
                    continue

                test_parser = TestParser(vars(self.options), filename=fullpath)
                test_info = test_parser.analyze_test()
                if test_info is None:
                    continue

                if 'reference' in test_info.keys():
                    reftests += 1
                    total_tests += 1
                    test_basename = self.filesystem.basename(test_info['test'])

                    # Add the ref file, following WebKit style.
                    # FIXME: Ideally we'd support reading the metadata
                    # directly rather than relying  on a naming convention.
                    # Using a naming convention creates duplicate copies of the
                    # reference files.
                    ref_file = self.filesystem.splitext(test_basename)[0] + '-expected'
                    # Make sure to use the extension from the *reference*, not
                    # from the test, because at least flexbox tests use XHTML
                    # references but HTML tests.
                    ref_file += self.filesystem.splitext(test_info['reference'])[1]

                    copy_list.append({'src': test_info['reference'], 'dest': ref_file,
                                      'reference_support_info': test_info['reference_support_info']})
                    copy_list.append({'src': test_info['test'], 'dest': filename})

                elif 'jstest' in test_info.keys():
                    jstests += 1
                    total_tests += 1
                    copy_list.append({'src': fullpath, 'dest': filename})
                else:
                    total_tests += 1
                    copy_list.append({'src': fullpath, 'dest': filename})

            if copy_list:
                # Only add this directory to the list if there's something to import
                self.import_list.append({'dirname': root, 'copy_list': copy_list,
                                         'reftests': reftests, 'jstests': jstests, 'total_tests': total_tests})

    def find_paths_to_skip(self):
        if self.options.ignore_expectations:
            return set()

        paths_to_skip = set()
        port = self.host.port_factory.get()
        w3c_import_expectations_path = self.webkit_finder.path_from_webkit_base('LayoutTests', 'W3CImportExpectations')
        w3c_import_expectations = self.filesystem.read_text_file(w3c_import_expectations_path)
        parser = TestExpectationParser(port, all_tests=(), is_lint_mode=False)
        expectation_lines = parser.parse(w3c_import_expectations_path, w3c_import_expectations)
        for line in expectation_lines:
            if 'SKIP' in line.expectations:
                if line.specifiers:
                    _log.warning("W3CImportExpectations:%s should not have any specifiers" % line.line_numbers)
                    continue
                paths_to_skip.add(line.name)
        return paths_to_skip

    def import_tests(self):
        total_imported_tests = 0
        total_imported_reftests = 0
        total_imported_jstests = 0
        total_prefixed_properties = {}

        for dir_to_copy in self.import_list:
            total_imported_tests += dir_to_copy['total_tests']
            total_imported_reftests += dir_to_copy['reftests']
            total_imported_jstests += dir_to_copy['jstests']

            prefixed_properties = []

            if not dir_to_copy['copy_list']:
                continue

            orig_path = dir_to_copy['dirname']

            subpath = self.filesystem.relpath(orig_path, self.top_of_repo)
            new_path = self.filesystem.join(self.destination_directory, subpath)

            if not self.filesystem.exists(new_path):
                self.filesystem.maybe_make_directory(new_path)

            copied_files = []

            for file_to_copy in dir_to_copy['copy_list']:
                # FIXME: Split this block into a separate function.
                orig_filepath = self.filesystem.normpath(file_to_copy['src'])

                if self.filesystem.isdir(orig_filepath):
                    # FIXME: Figure out what is triggering this and what to do about it.
                    _log.error('%s refers to a directory' % orig_filepath)
                    continue

                if not self.filesystem.exists(orig_filepath):
                    _log.warning('%s not found. Possible error in the test.', orig_filepath)
                    continue

                if self.path_too_long(orig_filepath):
                    _log.warning('%s skipped (longer than %d chars), to avoid hitting Windows max path length on builders (http://crbug.com/609871).',
                                 orig_filepath, MAX_PATH_LENGTH)
                    continue

                new_filepath = self.filesystem.join(new_path, file_to_copy['dest'])
                if 'reference_support_info' in file_to_copy.keys() and file_to_copy['reference_support_info'] != {}:
                    reference_support_info = file_to_copy['reference_support_info']
                else:
                    reference_support_info = None

                if not self.filesystem.exists(self.filesystem.dirname(new_filepath)):
                    if not self.import_in_place and not self.options.dry_run:
                        self.filesystem.maybe_make_directory(self.filesystem.dirname(new_filepath))

                relpath = self.filesystem.relpath(new_filepath, self.layout_tests_dir)
                if not self.options.overwrite and self.filesystem.exists(new_filepath):
                    _log.info('  skipping %s' % relpath)
                else:
                    # FIXME: Maybe doing a file diff is in order here for existing files?
                    # In other words, there's no sense in overwriting identical files, but
                    # there's no harm in copying the identical thing.
                    _log.info('  %s' % relpath)

                # Only html, xml, or css should be converted
                # FIXME: Eventually, so should js when support is added for this type of conversion
                mimetype = mimetypes.guess_type(orig_filepath)
                if 'html' in str(mimetype[0]) or 'xml' in str(mimetype[0]) or 'css' in str(mimetype[0]):
                    converted_file = convert_for_webkit(new_path, filename=orig_filepath,
                                                        reference_support_info=reference_support_info)

                    if not converted_file:
                        if not self.import_in_place and not self.options.dry_run:
                            self.filesystem.copyfile(orig_filepath, new_filepath)  # The file was unmodified.
                    else:
                        for prefixed_property in converted_file[0]:
                            total_prefixed_properties.setdefault(prefixed_property, 0)
                            total_prefixed_properties[prefixed_property] += 1

                        prefixed_properties.extend(set(converted_file[0]) - set(prefixed_properties))
                        if not self.options.dry_run:
                            outfile = open(new_filepath, 'wb')
                            outfile.write(converted_file[1].encode('utf-8'))
                            outfile.close()
                else:
                    if not self.import_in_place and not self.options.dry_run:
                        self.filesystem.copyfile(orig_filepath, new_filepath)

                copied_files.append(new_filepath.replace(self._webkit_root, ''))

        _log.info('')
        _log.info('Import complete')
        _log.info('')
        _log.info('IMPORTED %d TOTAL TESTS', total_imported_tests)
        _log.info('Imported %d reftests', total_imported_reftests)
        _log.info('Imported %d JS tests', total_imported_jstests)
        _log.info('Imported %d pixel/manual tests', total_imported_tests - total_imported_jstests - total_imported_reftests)
        _log.info('')

        if total_prefixed_properties:
            _log.info('Properties needing prefixes (by count):')
            for prefixed_property in sorted(total_prefixed_properties, key=lambda p: total_prefixed_properties[p]):
                _log.info('  %s: %s', prefixed_property, total_prefixed_properties[prefixed_property])

    def path_too_long(self, source_path):
        """Checks whether a source path is too long to import.

        Args:
          Absolute path of file to be imported.
        """
        path_from_repo_base = os.path.relpath(source_path, self.top_of_repo)
        return len(path_from_repo_base) > MAX_PATH_LENGTH

    def setup_destination_directory(self):
        """ Creates a destination directory that mirrors that of the source directory """

        new_subpath = self.dir_to_import[len(self.top_of_repo):]

        destination_directory = self.filesystem.join(self.destination_directory, new_subpath)

        if not self.filesystem.exists(destination_directory):
            self.filesystem.maybe_make_directory(destination_directory)

        _log.info('Tests will be imported into: %s', destination_directory)