Exemplo n.º 1
0
 def test_chdir__notexists(self):
     fs = FileSystem()
     newdir = '/dirdoesnotexist'
     if sys.platform == 'win32':
         newdir = 'c:\\dirdoesnotexist'
     with self.assertRaises(OSError):
         fs.chdir(newdir)
Exemplo n.º 2
0
 def setUp(self):
     LoggingTestCase.setUp(self)
     # FIXME: This should be a MockFileSystem once TextFileReader is moved entirely on top of FileSystem.
     self.filesystem = FileSystem()
     self._temp_dir = str(self.filesystem.mkdtemp())
     self._processor = TextFileReaderTest.MockProcessor()
     self._file_reader = TextFileReader(self.filesystem, self._processor)
Exemplo n.º 3
0
    def setUp(self):
        self.executive = Executive()
        self.filesystem = FileSystem()

        self.original_cwd = self.filesystem.getcwd()

        # Set up fresh git repository with one commit.
        self.untracking_checkout_path = self._mkdtemp(
            suffix='-git_unittest_untracking')
        self._run(['git', 'init', self.untracking_checkout_path])

        self._chdir(self.untracking_checkout_path)
        self._set_user_config()
        self._write_text_file('foo_file', 'foo')
        self._run(['git', 'add', 'foo_file'])
        self._run(['git', 'commit', '-am', 'dummy commit'])
        self.untracking_git = Git(cwd=self.untracking_checkout_path,
                                  filesystem=self.filesystem,
                                  executive=self.executive)

        # Then set up a second git repo that tracks the first one.
        self.tracking_git_checkout_path = self._mkdtemp(
            suffix='-git_unittest_tracking')
        self._run([
            'git', 'clone', '--quiet', self.untracking_checkout_path,
            self.tracking_git_checkout_path
        ])
        self._chdir(self.tracking_git_checkout_path)
        self._set_user_config()
        self.tracking_git = Git(cwd=self.tracking_git_checkout_path,
                                filesystem=self.filesystem,
                                executive=self.executive)
    def setUp(self):
        self.fs = FileSystem()
        self.setup_generic_test_dir()

        self._this_dir = os.path.dirname(os.path.abspath(__file__))
        self._missing_file = os.path.join(self._this_dir, 'missing_file.py')
        self._this_file = os.path.join(self._this_dir,
                                       'filesystem_unittest.py')
 def __init__(self, expectations, port):
     """
     Args:
         expectations: a blinkpy.web_tests.models.test_expectations.TestExpectations object
         port: a blinkpy.web_tests.port.Port object
     """
     self.expectations = expectations
     self.port = port
     # TODO(lpz): Use self.fs everywhere in this class and add tests
     self.fs = FileSystem()
     self.wpt_manifest = self.port.wpt_manifest("external/wpt")
     self.metadata_output_dir = ""
     self.checked_in_metadata_dir = ""
Exemplo n.º 6
0
    def test_remove_file_with_retry(self):
        RealFileSystemTest._remove_failures = 2

        def remove_with_exception(filename):
            RealFileSystemTest._remove_failures -= 1
            if RealFileSystemTest._remove_failures >= 0:
                try:
                    raise WindowsError
                except NameError:
                    raise FileSystem._WindowsError

        fs = FileSystem()
        self.assertTrue(fs.remove('filename', remove_with_exception))
        self.assertEqual(-1, RealFileSystemTest._remove_failures)
Exemplo n.º 7
0
def mark_missing_shards(summary_json,
                        input_directories,
                        merged_output_json,
                        fs=None):
    """Merge the contents of one or more results JSONs into a single JSON.

    Args:
        summary_json: swarming summary containing shard info.
        input_directories: A list of dir paths to JSON files that should be merged.
        merged_output_json: A path to a JSON file to which the merged results should be
        written.
        fs: filesystem object - MockFileSystem or FileSystem.
    """
    # summary.json is produced by swarming client.
    if fs != None:
        filesystem = fs
    else:
        filesystem = FileSystem()

    try:
        with filesystem.open_binary_file_for_reading(summary_json) as f:
            summary = json.load(f)
    except (IOError, ValueError) as e:
        raise MergeFailure('summary_json is missing or can not be read',
                           summary_json, None)

    missing_shards = []
    _log.debug("Missing shard processing: %s", input_directories)
    for index, result in enumerate(summary['shards']):
        output_path = None
        if result:
            output_path = find_shard_output_path(index, result.get('task_id'),
                                                 input_directories)
            if not output_path:
                missing_shards.append(index)

    if missing_shards:
        # TODO(crbug.com/1111954) - process summary_json along with others
        # so the merged output json can be written once to disk.
        with filesystem.open_binary_file_for_reading(merged_output_json) as f:
            try:
                json_contents_merged = json.load(f)
            except ValueError:
                raise MergeFailure(
                    'Failed to parse JSON from merged output.json',
                    merged_output_json, None)
        json_contents_merged['missing_shards'] = missing_shards

        with filesystem.open_binary_file_for_writing(merged_output_json) as f:
            MergeFilesJSONP.dump_jsonp(f, '', json_contents_merged, '')
Exemplo n.º 8
0
def main():
    logging.basicConfig(
        level=logging.INFO,
        format='[%(asctime)s %(levelname)s %(name)s] %(message)s',
        datefmt='%H:%M:%S')
    parser = argparse.ArgumentParser(description='Blink source mover')
    sub_parsers = parser.add_subparsers()

    update_parser = sub_parsers.add_parser('update')
    update_parser.set_defaults(command='update')
    update_parser.add_argument(
        '--run', dest='run', action='store_true', help='Update file contents')

    move_parser = sub_parsers.add_parser('move')
    move_parser.set_defaults(command='move')
    move_parser.add_argument(
        '--git',
        dest='run_git',
        action='store_true',
        help='Run |git mv| command instead of |mv|.')

    fixbranch_parser = sub_parsers.add_parser('fixbranch')
    fixbranch_parser.set_defaults(command='fixbranch', run=True, run_git=True)

    options = parser.parse_args()
    mover = MoveBlinkSource(FileSystem(), options, get_chromium_src_dir())
    if options.command == 'update':
        mover.update()
    elif options.command == 'move':
        mover.move()
    elif options.command == 'fixbranch':
        mover.fix_branch()
Exemplo n.º 9
0
def main():
    fs = FileSystem()
    file_pairs = plan_blink_move(fs, sys.argv[1:])
    print 'Show renaming plan. It contains files not in the repository.'
    print '<Source path relative to third_party/WebKit> => <Destination path relative to third_party/blink>'
    for pair in file_pairs:
        print '%s\t=>\t%s' % pair
Exemplo n.º 10
0
 def test_chdir(self):
     fs = FileSystem()
     cwd = fs.getcwd()
     newdir = '/'
     if sys.platform == 'win32':
         newdir = 'c:\\'
     fs.chdir(newdir)
     self.assertEqual(fs.getcwd(), newdir)
     fs.chdir(cwd)
Exemplo n.º 11
0
 def __init__(self, expectations, port):
     """
     Args:
         expectations: a blinkpy.web_tests.models.test_expectations.TestExpectations object
         port: a blinkpy.web_tests.port.Port object
     """
     self.expectations = expectations
     self.port = port
     # TODO(lpz): Use self.fs everywhere in this class and add tests
     self.fs = FileSystem()
     self.wpt_manifest = self.port.wpt_manifest("external/wpt")
     self.metadata_output_dir = ""
     self.checked_in_metadata_dir = ""
     self.process_baselines = True
     self.handle_annotations = True
     self.checked_in_metadata_copied = set()
     self.use_subtest_results = False
Exemplo n.º 12
0
    def __init__(self, filesystem=None):
        Merger.__init__(self)

        self.filesystem = filesystem or FileSystem()

        # Default to just checking the file contents matches.
        self.add_helper(lambda *args: True, MergeFilesMatchingContents(self.filesystem))
        # Copy the file it it's the only one.
        self.add_helper(lambda _, to_merge: len(to_merge) == 1, MergeFilesOne(self.filesystem))
Exemplo n.º 13
0
 def test_walk(self):
     fs = FileSystem()
     with fs.mkdtemp(prefix='filesystem_unittest_') as d:
         self.assertEqual(list(fs.walk(d)), [(d, [], [])])
         new_file = os.path.join(d, 'foo')
         fs.write_text_file(new_file, u'foo')
         self.assertEqual(list(fs.walk(d)), [(d, [], ['foo'])])
         os.remove(new_file)
Exemplo n.º 14
0
    def test_read_and_write_text_file(self):
        fs = FileSystem()
        text_path = None

        unicode_text_string = u'\u016An\u012Dc\u014Dde\u033D'
        try:
            text_path = tempfile.mktemp(prefix='tree_unittest_')
            file = fs.open_text_file_for_writing(text_path)
            file.write(unicode_text_string)
            file.close()

            file = fs.open_text_file_for_reading(text_path)
            read_text = file.read()
            file.close()

            self.assertEqual(read_text, unicode_text_string)
        finally:
            if text_path and fs.isfile(text_path):
                os.remove(text_path)
Exemplo n.º 15
0
 def __init__(self):
     self.executable = sys.executable
     self.executive = Executive()
     self.filesystem = FileSystem()
     self.user = User()
     self.platform = PlatformInfo(sys, platform, self.filesystem,
                                  self.executive)
     self.stdin = sys.stdin
     self.stdout = sys.stdout
     self.stderr = sys.stderr
     self.environ = os.environ
Exemplo n.º 16
0
    def test_read_and_write_file(self):
        fs = FileSystem()
        text_path = None
        binary_path = None

        unicode_text_string = u'\u016An\u012Dc\u014Dde\u033D'
        hex_equivalent = '\xC5\xAA\x6E\xC4\xAD\x63\xC5\x8D\x64\x65\xCC\xBD'
        try:
            text_path = tempfile.mktemp(prefix='tree_unittest_')
            binary_path = tempfile.mktemp(prefix='tree_unittest_')
            fs.write_text_file(text_path, unicode_text_string)
            contents = fs.read_binary_file(text_path)
            self.assertEqual(contents, hex_equivalent)

            fs.write_binary_file(binary_path, hex_equivalent)
            text_contents = fs.read_text_file(binary_path)
            self.assertEqual(text_contents, unicode_text_string)
        finally:
            if text_path and fs.isfile(text_path):
                os.remove(text_path)
            if binary_path and fs.isfile(binary_path):
                os.remove(binary_path)
Exemplo n.º 17
0
    def setUp(self):
        self.executive = Executive()
        self.filesystem = FileSystem()

        self.original_cwd = self.filesystem.getcwd()

        # Set up fresh git repository with one commit.
        self.untracking_checkout_path = self._mkdtemp(
            suffix='-git_unittest_untracking')
        self._run(['git', 'init', self.untracking_checkout_path])

        self._chdir(self.untracking_checkout_path)
        # Explicitly create the default branch instead of relying on
        # init.defaultBranch. We don't use the new --initial-branch flag with
        # `git init` to keep the tests compatible with older versions of git.
        self._run(['git', 'checkout', '-b', 'master'])
        self._set_user_config()
        self._write_text_file('foo_file', 'foo')
        self._run(['git', 'add', 'foo_file'])
        self._run(['git', 'commit', '-am', 'dummy commit'])
        self.untracking_git = Git(
            cwd=self.untracking_checkout_path,
            filesystem=self.filesystem,
            executive=self.executive)

        # Then set up a second git repo that tracks the first one.
        self.tracking_git_checkout_path = self._mkdtemp(
            suffix='-git_unittest_tracking')
        self._run([
            'git', 'clone', '--quiet', self.untracking_checkout_path,
            self.tracking_git_checkout_path
        ])
        self._chdir(self.tracking_git_checkout_path)
        self._set_user_config()
        self.tracking_git = Git(
            cwd=self.tracking_git_checkout_path,
            filesystem=self.filesystem,
            executive=self.executive)
Exemplo n.º 18
0
    def test_real_code(self):
        # This test makes sure the real (unmocked) code actually works.
        info = PlatformInfo(sys, platform, FileSystem(), Executive())
        self.assertNotEquals(info.os_name, '')
        self.assertNotEquals(info.os_version, '')
        self.assertNotEquals(info.display_name(), '')
        self.assertTrue(info.is_mac() or info.is_win() or info.is_linux()
                        or info.is_freebsd())
        self.assertIsNotNone(info.terminal_width())

        if info.is_linux():
            self.assertIsNotNone(info.linux_distribution())

        if info.is_mac():
            self.assertTrue(info.total_bytes_memory() > 0)
        else:
            self.assertIsNone(info.total_bytes_memory())
Exemplo n.º 19
0
 def run_pylint(self, path):
     finder = PathFinder(FileSystem())
     executive = Executive()
     env = os.environ.copy()
     env['PYTHONPATH'] = os.pathsep.join([
         get_blink_tools_dir(),
         finder.path_from_blink_source('build', 'scripts'),
         get_blinkpy_thirdparty_dir(),
         finder.path_from_blink_source('bindings', 'scripts'),
         finder.path_from_chromium_base('build', 'android'),
         finder.path_from_chromium_base('third_party', 'catapult', 'devil'),
         finder.path_from_chromium_base('third_party', 'pymock'),
     ])
     return executive.run_command([
         sys.executable,
         finder.path_from_depot_tools_base('pylint.py'),
         '--output-format=parseable',
         '--rcfile=' + finder.path_from_blink_tools('blinkpy', 'pylintrc'),
         path,
     ],
                                  env=env,
                                  error_handler=executive.ignore_error)
Exemplo n.º 20
0
    def __init__(self,
                 cwd=None,
                 executive=None,
                 filesystem=None,
                 platform=None):
        self._executive = executive or Executive()
        self._filesystem = filesystem or FileSystem()
        self._executable_name = self.find_executable_name(
            self._executive, platform)

        self.cwd = cwd or self._filesystem.abspath(self._filesystem.getcwd())
        if not self.in_working_directory(self.cwd):
            module_directory = self._filesystem.abspath(
                self._filesystem.dirname(
                    self._filesystem.path_to_module(self.__module__)))
            _log.info(
                'The current directory (%s) is not in a git repo, trying directory %s.',
                cwd, module_directory)
            if self.in_working_directory(module_directory):
                self.cwd = module_directory
            _log.error('Failed to find Git repo for %s or %s', cwd,
                       module_directory)

        self.checkout_root = self.find_checkout_root(self.cwd)
Exemplo n.º 21
0
    def test_maybe_make_directory__failure(self):
        # FIXME: os.chmod() doesn't work on Windows to set directories
        # as readonly, so we skip this test for now.
        if sys.platform == 'win32':
            return

        fs = FileSystem()
        with fs.mkdtemp(prefix='filesystem_unittest_') as d:
            # Remove write permissions on the parent directory.
            os.chmod(d, stat.S_IRUSR)

            # Now try to create a sub directory - should fail.
            sub_dir = fs.join(d, 'subdir')
            with self.assertRaises(OSError):
                fs.maybe_make_directory(sub_dir)

            # Clean up in case the test failed and we did create the
            # directory.
            if os.path.exists(sub_dir):
                os.rmdir(sub_dir)
class WPTMetadataBuilder(object):
    def __init__(self, expectations, port):
        """
        Args:
            expectations: a blinkpy.web_tests.models.test_expectations.TestExpectations object
            port: a blinkpy.web_tests.port.Port object
        """
        self.expectations = expectations
        self.port = port
        # TODO(lpz): Use self.fs everywhere in this class and add tests
        self.fs = FileSystem()
        self.wpt_manifest = self.port.wpt_manifest("external/wpt")
        self.metadata_output_dir = ""
        self.checked_in_metadata_dir = ""
        self.process_baselines = True
        self.handle_annotations = True

    def run(self, args=None):
        """Main entry point to parse flags and execute the script."""
        parser = argparse.ArgumentParser(description=__doc__)
        parser.add_argument(
            "--metadata-output-dir",
            help="The directory to output the metadata files into.")
        parser.add_argument(
            "--checked-in-metadata-dir",
            help="Root directory of any checked-in WPT metadata files to use. "
            "If set, these files will take precedence over legacy expectations "
            "and baselines when both exist for a test.")
        parser.add_argument(
            '-v',
            '--verbose',
            action='store_true',
            help='More verbose logging.')
        parser.add_argument(
            "--process-baselines",
            action="store_true",
            default=True,
            dest="process_baselines",
            help="Whether to translate baseline (-expected.txt) files into WPT "
            "metadata files. This translation is lossy and results in any "
            "subtest being accepted by wptrunner.")
        parser.add_argument("--no-process-baselines",
                            action="store_false",
                            dest="process_baselines")
        parser.add_argument(
            "--handle-annotations",
            action="store_true",
            default=True,
            dest="handle_annotations",
            help="Whether to handle annotations in expectations files. These "
            "are trailing comments that give additional details for how "
            "to translate an expectation into WPT metadata.")
        parser.add_argument("--no-handle-annotations",
                            action="store_false",
                            dest="handle_annotations")
        args = parser.parse_args(args)

        log_level = logging.DEBUG if args.verbose else logging.INFO
        configure_logging(logging_level=log_level, include_time=True)

        self.metadata_output_dir = args.metadata_output_dir
        self.checked_in_metadata_dir = args.checked_in_metadata_dir
        self.process_baselines = args.process_baselines
        self.handle_annotations = args.handle_annotations
        self._build_metadata_and_write()

        return 0

    @staticmethod
    def status_bitmap_to_string(test_status_bitmap):
        statuses = []
        result = ""
        if test_status_bitmap & SUBTEST_FAIL:
            result += "  blink_expect_any_subtest_status: True # wpt_metadata_builder.py\n"

        if test_status_bitmap & HARNESS_ERROR:
            statuses.append("ERROR")
        if test_status_bitmap & TEST_PASS:
            # We need both PASS and OK. Reftests will PASS while testharness
            # tests are OK.
            statuses.append("PASS")
            statuses.append("OK")
        if test_status_bitmap & TEST_FAIL:
            # We need both FAIL and ERROR. Reftests will FAIL while testharness
            # tests have ERRORs.
            statuses.append("FAIL")
            statuses.append("ERROR")
        if test_status_bitmap & TEST_TIMEOUT:
            statuses.append("TIMEOUT")
        if test_status_bitmap & TEST_CRASH:
            statuses.append("CRASH")
        if test_status_bitmap & TEST_PRECONDITION_FAILED:
            statuses.append("PRECONDITION_FAILED")

        if statuses:
            result += "  expected: [%s]\n" % ", ".join(statuses)
        return result

    def _build_metadata_and_write(self):
        """Build the metadata files and write them to disk."""
        if os.path.exists(self.metadata_output_dir):
            _log.debug("Output dir exists, deleting: %s",
                       self.metadata_output_dir)
            import shutil
            shutil.rmtree(self.metadata_output_dir)

        tests_for_metadata = self.get_tests_needing_metadata()
        _log.info("Found %d tests requiring metadata", len(tests_for_metadata))
        for test_name, test_status_bitmap in tests_for_metadata.items():
            filename, file_contents = self.get_metadata_filename_and_contents(
                test_name, test_status_bitmap)
            if not filename or not file_contents:
                continue
            self._write_to_file(filename, file_contents)

        if self.checked_in_metadata_dir and os.path.exists(
                self.checked_in_metadata_dir):
            _log.info("Copying checked-in WPT metadata on top of translated "
                      "files.")
            self._copy_checked_in_metadata()
        else:
            _log.warning("Not using checked-in WPT metadata, path is empty or "
                         "does not exist: %s" % self.checked_in_metadata_dir)

        # Finally, output a stamp file with the same name as the output
        # directory. The stamp file is empty, it's only used for its mtime.
        # This makes the GN build system happy (see crbug.com/995112).
        with open(self.metadata_output_dir + ".stamp", "w"):
            pass

    def _copy_checked_in_metadata(self):
        """Copies checked-in metadata files to the metadata output directory."""
        for filename in self.fs.files_under(self.checked_in_metadata_dir):
            # We match any .ini files in the path. This will find .ini files
            # other than just metadata (such as tox.ini), but that is ok
            # since wptrunner will just ignore those.
            if not fnmatch.fnmatch(filename, "*.ini"):
                continue

            # Found a checked-in .ini file. Copy it to the metadata output
            # directory in the same sub-path as where it is checked in.
            # So /checked/in/a/b/c.ini goes to /metadata/out/a/b/c.ini
            output_path = filename.replace(self.checked_in_metadata_dir,
                                           self.metadata_output_dir)
            if not self.fs.exists(self.fs.dirname(output_path)):
                self.fs.maybe_make_directory(self.fs.dirname(output_path))
            _log.debug("Copying %s to %s" % (filename, output_path))
            self.fs.copyfile(filename, output_path)

    def _write_to_file(self, filename, file_contents):
        # Write the contents to the file name
        if not os.path.exists(os.path.dirname(filename)):
            os.makedirs(os.path.dirname(filename))
        # Note that we append to the metadata file in order to allow multiple
        # tests to be present in the same .ini file (ie: for multi-global tests)
        with open(filename, "a") as metadata_file:
            metadata_file.write(file_contents)

    def get_tests_needing_metadata(self):
        """Determines which tests need metadata files.

        This function loops over the tests to be run and checks whether each test
        has an expectation (eg: in TestExpectations) and/or a baseline (ie:
        test-name-expected.txt). The existence of those things will determine
        the information that will be emitted into the tests's metadata file.

        Returns:
            A dict. The key is the string test name and the value is an integer
            bitmap of statuses for the test.
        """
        tests_needing_metadata = defaultdict(int)
        for test_name in self.port.tests(paths=["external/wpt"]):
            # First check for expectations. If a test is skipped then we do not
            # look for more statuses
            expectation_line = self.expectations.get_expectations(test_name)
            self._handle_test_with_expectation(test_name, expectation_line,
                                               tests_needing_metadata)
            if self._test_was_skipped(test_name, tests_needing_metadata):
                # Do not consider other statuses if a test is skipped
                continue

            # Check if the test has a baseline
            if self.process_baselines:
                test_baseline = self.port.expected_text(test_name)
                if not test_baseline:
                    continue
                self._handle_test_with_baseline(test_name, test_baseline,
                                                tests_needing_metadata)
        return tests_needing_metadata

    def _handle_test_with_expectation(self, test_name, expectation_line,
                                      status_dict):
        """Handles a single test expectation and updates |status_dict|."""
        test_statuses = expectation_line.results
        annotations = expectation_line.trailing_comments
        if ResultType.Skip in test_statuses:
            # Skips are handled alone, so don't look at any other statuses
            status_dict[test_name] |= SKIP_TEST
            return

        # Guard against the only test_status being Pass (without any
        # annotations), we don't want to create metadata for such a test.
        if (len(test_statuses) == 1 and ResultType.Pass in test_statuses
                and not annotations):
            return

        status_bitmap = 0
        if ResultType.Pass in test_statuses:
            status_bitmap |= TEST_PASS
        if ResultType.Failure in test_statuses:
            status_bitmap |= TEST_FAIL
        if ResultType.Timeout in test_statuses:
            status_bitmap |= TEST_TIMEOUT
        if ResultType.Crash in test_statuses:
            status_bitmap |= TEST_CRASH
        if self.handle_annotations and annotations:
            if "wpt_subtest_failure" in annotations:
                status_bitmap |= SUBTEST_FAIL
            if "wpt_precondition_failed" in annotations:
                status_bitmap |= TEST_PRECONDITION_FAILED
        # Update status bitmap for this test
        status_dict[test_name] |= status_bitmap

    def _test_was_skipped(self, test_name, status_dict):
        """Returns whether |test_name| is marked as skipped in |status_dict|."""
        return test_name in status_dict and (
            status_dict[test_name] & SKIP_TEST)

    def _handle_test_with_baseline(self, test_name, test_baseline,
                                   status_dict):
        """Handles a single test baseline and updates |status_dict|."""
        status_bitmap = 0
        if re.search(r"^(FAIL|NOTRUN|TIMEOUT)", test_baseline, re.MULTILINE):
            status_bitmap |= SUBTEST_FAIL
        if re.search(r"^Harness Error\.", test_baseline, re.MULTILINE):
            status_bitmap |= HARNESS_ERROR
        if status_bitmap > 0:
            status_dict[test_name] |= status_bitmap
        else:
            # Treat this as an error because we don't want it to happen.
            # Either the non-FAIL statuses need to be handled here, or the
            # baseline is all PASS which should just be deleted.
            _log.error("Test %s has a non-FAIL baseline" % test_name)

    def _metadata_filename_from_test_file(self, wpt_test_file):
        """Returns the filename of the metadata (.ini) file for the test.

        Args:
            wpt_test_file: The file on disk that the specified test lives in.
                For multi-global tests this is usually a ".js" file.

        Returns:
            The fully-qualified string path of the metadata file for this test.
        """
        assert "?" not in wpt_test_file
        test_file_parts = wpt_test_file.split("/")
        return os.path.join(self.metadata_output_dir,
                            *test_file_parts) + ".ini"

    def _metadata_inline_test_name_from_test_name(self, wpt_test_name):
        """Returns the test name to use *inside* of a metadata file.

        The inline name inside the metadata file is the logical name of the
        test without any subdirectories.
        For multi-global tests this means that it must have the specific scope
        of the test (eg: worker, window, etc). This name must also include any
        variants that are set.

        Args:
            wpt_test_name: The fully-qualified test name which contains all
                subdirectories as well as scope (for multi-globals), and
                variants.

        Returns:
            The string test name inside of the metadata file.
        """
        # To generate the inline test name we basically want to strip away the
        # subdirectories from the test name, being careful not to accidentally
        # clobber the variant.
        variant_split = wpt_test_name.split("?")
        test_path = variant_split[0]
        test_name_part = test_path.split("/")[-1]
        variant = "?" + variant_split[1] if len(variant_split) == 2 else ""
        return test_name_part + variant

    def get_metadata_filename_and_contents(self,
                                           chromium_test_name,
                                           test_status_bitmap=0):
        """Determines the metadata filename and contents for the specified test.

        The metadata filename is derived from the test name but will differ if
        the expectation is for a single test or for a directory of tests. The
        contents of the metadata file will also differ for those two cases.

        Args:
            chromium_test_name: A Chromium test name from the expectation file,
                which starts with `external/wpt`.
            test_status_bitmap: An integer containing additional data about the
                status, such as enumerating flaky statuses, or whether a test has
                a combination of harness error and subtest failure.

        Returns:
            A pair of strings, the first is the path to the metadata file and
            the second is the contents to write to that file. Or None if the
            test does not need a metadata file.
        """
        # Ignore expectations for non-WPT tests
        if (not chromium_test_name
                or not chromium_test_name.startswith('external/wpt')):
            return None, None

        # Split the test name by directory. We omit the first 2 entries because
        # they are 'external' and 'wpt' and these don't exist in the WPT's test
        # names.
        wpt_test_name_parts = chromium_test_name.split("/")[2:]
        # The WPT test name differs from the Chromium test name in that the WPT
        # name omits `external/wpt`.
        wpt_test_name = "/".join(wpt_test_name_parts)

        # Check if this is a test file or a test directory
        is_test_dir = chromium_test_name.endswith("/")
        metadata_filename = None
        metadata_file_contents = None
        if is_test_dir:
            # A test directory gets one metadata file called __dir__.ini and all
            # tests in that dir are skipped.
            metadata_filename = os.path.join(self.metadata_output_dir,
                                             *wpt_test_name_parts)
            metadata_filename = os.path.join(metadata_filename, "__dir__.ini")
            _log.debug("Creating a dir-wide ini file %s", metadata_filename)

            metadata_file_contents = self._get_dir_disabled_string()
        else:
            # For individual tests, we create one file per test, with the name
            # of the test in the file as well.
            test_file_path = self.wpt_manifest.file_path_for_test_url(
                wpt_test_name)
            if not test_file_path:
                _log.info("Could not find file for test %s, skipping" %
                          wpt_test_name)
                return None, None

            metadata_filename = self._metadata_filename_from_test_file(
                test_file_path)
            _log.debug("Creating a test ini file %s with status_bitmap %s",
                       metadata_filename, test_status_bitmap)
            inline_test_name = self._metadata_inline_test_name_from_test_name(
                wpt_test_name)
            metadata_file_contents = self._get_test_failed_string(
                inline_test_name, test_status_bitmap)

        return metadata_filename, metadata_file_contents

    def _get_dir_disabled_string(self):
        return "disabled: wpt_metadata_builder.py\n"

    def _get_test_disabled_string(self, test_name):
        return "[%s]\n  disabled: wpt_metadata_builder.py\n" % test_name

    def _get_test_failed_string(self, inline_test_name, test_status_bitmap):
        # The contents of the metadata file is two lines:
        # 1. the inline name of the WPT test pathinside square brackets. This
        #    name contains the test scope (for multi-globals) and variants.
        # 2. an indented line with the test status and reason
        result = "[%s]\n" % inline_test_name

        # A skipped test is a little special in that it doesn't happen along with
        # any other status. So we compare directly against SKIP_TEST and also
        # return right away.
        if test_status_bitmap == SKIP_TEST:
            result += "  disabled: wpt_metadata_builder.py\n"
            return result

        # Other test statuses can exist together. But ensure we have at least one.
        expected_string = self.status_bitmap_to_string(test_status_bitmap)
        if expected_string:
            result += expected_string
        return result
Exemplo n.º 23
0
 def __init__(self):
     self.web = Web()
     self.builders = BuilderList.load_default_builder_list(FileSystem())
Exemplo n.º 24
0
 def __init__(self, tests, is_debug):
     self.executive = Executive()
     self.tests = tests
     self.expected_failure = tests[-1]
     self.is_debug = is_debug
     self.path_finder = PathFinder(FileSystem())
Exemplo n.º 25
0
    def test_sep(self):
        fs = FileSystem()

        self.assertEqual(fs.sep, os.sep)
        self.assertEqual(fs.join('foo', 'bar'),
                         os.path.join('foo', 'bar'))
Exemplo n.º 26
0
 def test_read_text_file__missing(self):
     fs = FileSystem()
     with self.assertRaises(IOError):
         fs.read_text_file(self._missing_file)
Exemplo n.º 27
0
 def test_isdir__true(self):
     fs = FileSystem()
     self.assertTrue(fs.isdir(self._this_dir))
Exemplo n.º 28
0
 def test_isdir__false(self):
     fs = FileSystem()
     self.assertFalse(fs.isdir(self._this_file))
Exemplo n.º 29
0
def main(argv):

    parser = argparse.ArgumentParser()
    parser.description = """\
Merges sharded web test results into a single output directory.
"""
    parser.epilog = """\

If a post merge script is given, it will be run on the resulting merged output
directory. The script will be given the arguments plus
'--results_dir <output_directory>'.
"""

    parser.add_argument('-v',
                        '--verbose',
                        action='store_true',
                        help='Output information about merging progress.')

    parser.add_argument(
        '--results-json-override-value',
        nargs=2,
        metavar=('KEY', 'VALUE'),
        default=[],
        action='append',
        help='Override the value of a value in the result style JSON file '
        '(--result-jsons-override-value layout_test_dirs /tmp/output).')
    parser.add_argument(
        '--results-json-allow-unknown-if-matching',
        action='store_true',
        default=False,
        help='Allow unknown values in the result.json file as long as the '
        'value match on all shards.')

    parser.add_argument('--output-directory',
                        help='Directory to create the merged results in.')
    parser.add_argument(
        '--allow-existing-output-directory',
        action='store_true',
        default=False,
        help='Allow merging results into a directory which already exists.')
    parser.add_argument(
        '--remove-existing-layout-test-results',
        action='store_true',
        default=False,
        help='Remove existing layout test results from the output directory.')
    parser.add_argument('--input-directories',
                        nargs='+',
                        help='Directories to merge the results from.')

    # Swarming Isolated Merge Script API
    # script.py \
    #     --build-properties /s/build.json \
    #     --output-json /tmp/output.json \
    #     --task-output-dir /path/to/task/output/dir \
    #     shard0/output.json \
    #     shard1/output.json
    parser.add_argument(
        '-o',
        '--output-json',
        help='(Swarming Isolated Merge Script API) Output JSON file to create.'
    )
    parser.add_argument(
        '--build-properties',
        help=
        '(Swarming Isolated Merge Script API) Build property JSON file provided by recipes.'
    )
    parser.add_argument(
        '--task-output-dir',
        help=
        '(Swarming Isolated Merge Script API) Directory containing all swarming task results.'
    )
    parser.add_argument(
        '--results-json-override-with-build-property',
        nargs=2,
        metavar=('RESULT_JSON_KEY', 'BUILD_PROPERTY_KEY'),
        default=[],
        action='append',
        help='Override the value of a value in the result style JSON file '
        '(--result-jsons-override-value layout_test_dirs /tmp/output).')
    parser.add_argument(
        '--summary-json',
        help=
        '(Swarming Isolated Merge Script API) Summary of shard state running on swarming.'
        '(Output of the swarming.py collect --task-summary-json=XXX command.)')

    # Script to run after merging the directories together. Normally used with archive_layout_test_results.py
    # scripts/slave/chromium/archive_layout_test_results.py \
    #     --results-dir /b/rr/tmpIcChUS/w/layout-test-results \
    #     --build-dir /b/rr/tmpIcChUS/w/src/out \
    #     --build-number 3665 \
    #     --builder-name 'WebKit Linux - RandomOrder' \
    #     --gs-bucket gs://chromium-layout-test-archives \
    #     --staging-dir /b/c/chrome_staging \
    #     --slave-utils-gsutil-py-path /b/rr/tmpIcChUS/rw/scripts/slave/.recipe_deps/depot_tools/gsutil.py
    # in dir /b/rr/tmpIcChUS/w
    parser.add_argument(
        '--post-merge-script',
        nargs='*',
        help='Script to call after the results have been merged.')

    # The position arguments depend on if we are using the isolated merge
    # script API mode or not.
    parser.add_argument('positional',
                        nargs='*',
                        help='output.json from shards.')

    args = parser.parse_args(argv)
    if args.verbose:
        logging_level = logging.DEBUG
    else:
        logging_level = logging.INFO
    configure_logging(logging_level=logging_level)

    # Map the isolate arguments back to our output / input arguments.
    if args.output_json:
        logging.info('Running with isolated arguments')
        assert args.positional

        # TODO(tansell): Once removed everywhere, these lines can be removed.
        # For now we just check nobody is supply arguments we didn't expect.
        if args.results_json_override_with_build_property:
            for result_key, build_prop_key in args.results_json_override_with_build_property:
                assert (result_key, build_prop_key
                        ) in RESULTS_JSON_VALUE_OVERRIDE_WITH_BUILD_PROPERTY, (
                            "%s not in %s" %
                            (result_key,
                             RESULTS_JSON_VALUE_OVERRIDE_WITH_BUILD_PROPERTY))

        if not args.output_directory:
            args.output_directory = os.getcwd()
            args.allow_existing_output_directory = True
            args.remove_existing_layout_test_results = True

        assert not args.input_directories
        args.input_directories = [os.path.dirname(f) for f in args.positional]
        args.positional = []

    # Allow skipping the --input-directories bit, for example,
    #   merge_web_test_results.py -o outputdir shard0 shard1 shard2
    if args.positional and not args.input_directories:
        args.input_directories = args.positional

    if not args.output_directory:
        args.output_directory = tempfile.mkdtemp(
            suffix='_merged_web_test_results')
        args.allow_existing_output_directory = True

    assert args.output_directory
    assert args.input_directories

    results_json_value_overrides = {}
    if args.build_properties:
        build_properties = json.loads(args.build_properties)

        for result_key, build_prop_key in RESULTS_JSON_VALUE_OVERRIDE_WITH_BUILD_PROPERTY:
            if build_prop_key not in build_properties:
                logging.warn('Required build property key "%s" was not found!',
                             build_prop_key)
                continue
            results_json_value_overrides[result_key] = build_properties[
                build_prop_key]
        logging.debug('results_json_value_overrides: %r',
                      results_json_value_overrides)

    merger = WebTestDirMerger(
        results_json_value_overrides=results_json_value_overrides,
        results_json_allow_unknown_if_matching=args.
        results_json_allow_unknown_if_matching)

    ensure_empty_dir(FileSystem(),
                     args.output_directory,
                     allow_existing=args.allow_existing_output_directory,
                     remove_existing=args.remove_existing_layout_test_results)

    merger.merge(args.output_directory, args.input_directories)

    merged_output_json = os.path.join(args.output_directory, 'output.json')
    if os.path.exists(merged_output_json) and args.output_json:
        # process summary_json to mark missing shards.
        mark_missing_shards(args.summary_json, args.input_directories,
                            merged_output_json)
        logging.debug('Copying output.json from %s to %s', merged_output_json,
                      args.output_json)
        shutil.copyfile(merged_output_json, args.output_json)

    if args.post_merge_script:
        logging.debug('Changing directory to %s', args.output_directory)
        os.chdir(args.output_directory)

        post_script = list(args.post_merge_script)
        post_script.append('--result-dir', args.output_directory)

        logging.info('Running post merge script %r', post_script)
        os.execlp(post_script)
Exemplo n.º 30
0
    def test_maybe_make_directory__success(self):
        fs = FileSystem()

        with fs.mkdtemp(prefix='filesystem_unittest_') as base_path:
            sub_path = os.path.join(base_path, 'newdir')
            self.assertFalse(os.path.exists(sub_path))
            self.assertFalse(fs.isdir(sub_path))

            fs.maybe_make_directory(sub_path)
            self.assertTrue(os.path.exists(sub_path))
            self.assertTrue(fs.isdir(sub_path))

            # Make sure we can re-create it.
            fs.maybe_make_directory(sub_path)
            self.assertTrue(os.path.exists(sub_path))
            self.assertTrue(fs.isdir(sub_path))

            # Clean up.
            os.rmdir(sub_path)

        self.assertFalse(os.path.exists(base_path))
        self.assertFalse(fs.isdir(base_path))