class RealFileSystemTest(unittest.TestCase, GenericFileSystemTests):
    def setUp(self):
        self.fs = FileSystem()
        self.setup_generic_test_dir()

        self._this_dir = os.path.dirname(os.path.abspath(__file__))
        self._missing_file = os.path.join(self._this_dir, 'missing_file.py')
        self._this_file = os.path.join(self._this_dir,
                                       'filesystem_unittest.py')

    def tearDown(self):
        self.teardown_generic_test_dir()
        self.fs = None

    def test_chdir(self):
        fs = FileSystem()
        cwd = fs.getcwd()
        newdir = '/'
        if sys.platform == 'win32':
            newdir = 'c:\\'
        fs.chdir(newdir)
        self.assertEqual(fs.getcwd(), newdir)
        fs.chdir(cwd)

    def test_chdir__notexists(self):
        fs = FileSystem()
        newdir = '/dirdoesnotexist'
        if sys.platform == 'win32':
            newdir = 'c:\\dirdoesnotexist'
        with self.assertRaises(OSError):
            fs.chdir(newdir)

    def test_exists__true(self):
        fs = FileSystem()
        self.assertTrue(fs.exists(self._this_file))

    def test_exists__false(self):
        fs = FileSystem()
        self.assertFalse(fs.exists(self._missing_file))

    def test_getcwd(self):
        fs = FileSystem()
        self.assertTrue(fs.exists(fs.getcwd()))

    def test_isdir__true(self):
        fs = FileSystem()
        self.assertTrue(fs.isdir(self._this_dir))

    def test_isdir__false(self):
        fs = FileSystem()
        self.assertFalse(fs.isdir(self._this_file))

    def test_join(self):
        fs = FileSystem()
        self.assertEqual(fs.join('foo', 'bar'), os.path.join('foo', 'bar'))

    def test_listdir(self):
        fs = FileSystem()
        with fs.mkdtemp(prefix='filesystem_unittest_') as d:
            self.assertEqual(fs.listdir(d), [])
            new_file = os.path.join(d, 'foo')
            fs.write_text_file(new_file, u'foo')
            self.assertEqual(fs.listdir(d), ['foo'])
            os.remove(new_file)

    def test_walk(self):
        fs = FileSystem()
        with fs.mkdtemp(prefix='filesystem_unittest_') as d:
            self.assertEqual(list(fs.walk(d)), [(d, [], [])])
            new_file = os.path.join(d, 'foo')
            fs.write_text_file(new_file, u'foo')
            self.assertEqual(list(fs.walk(d)), [(d, [], ['foo'])])
            os.remove(new_file)

    def test_maybe_make_directory__success(self):
        fs = FileSystem()

        with fs.mkdtemp(prefix='filesystem_unittest_') as base_path:
            sub_path = os.path.join(base_path, 'newdir')
            self.assertFalse(os.path.exists(sub_path))
            self.assertFalse(fs.isdir(sub_path))

            fs.maybe_make_directory(sub_path)
            self.assertTrue(os.path.exists(sub_path))
            self.assertTrue(fs.isdir(sub_path))

            # Make sure we can re-create it.
            fs.maybe_make_directory(sub_path)
            self.assertTrue(os.path.exists(sub_path))
            self.assertTrue(fs.isdir(sub_path))

            # Clean up.
            os.rmdir(sub_path)

        self.assertFalse(os.path.exists(base_path))
        self.assertFalse(fs.isdir(base_path))

    def test_maybe_make_directory__failure(self):
        # FIXME: os.chmod() doesn't work on Windows to set directories
        # as readonly, so we skip this test for now.
        if sys.platform == 'win32':
            return

        fs = FileSystem()
        with fs.mkdtemp(prefix='filesystem_unittest_') as d:
            # Remove write permissions on the parent directory.
            os.chmod(d, stat.S_IRUSR)

            # Now try to create a sub directory - should fail.
            sub_dir = fs.join(d, 'subdir')
            with self.assertRaises(OSError):
                fs.maybe_make_directory(sub_dir)

            # Clean up in case the test failed and we did create the
            # directory.
            if os.path.exists(sub_dir):
                os.rmdir(sub_dir)

    def test_read_and_write_text_file(self):
        fs = FileSystem()
        text_path = None

        unicode_text_string = u'\u016An\u012Dc\u014Dde\u033D'
        try:
            text_path = tempfile.mktemp(prefix='tree_unittest_')
            file = fs.open_text_file_for_writing(text_path)
            file.write(unicode_text_string)
            file.close()

            file = fs.open_text_file_for_reading(text_path)
            read_text = file.read()
            file.close()

            self.assertEqual(read_text, unicode_text_string)
        finally:
            if text_path and fs.isfile(text_path):
                os.remove(text_path)

    def test_read_and_write_file(self):
        fs = FileSystem()
        text_path = None
        binary_path = None

        unicode_text_string = u'\u016An\u012Dc\u014Dde\u033D'
        hex_equivalent = '\xC5\xAA\x6E\xC4\xAD\x63\xC5\x8D\x64\x65\xCC\xBD'
        try:
            text_path = tempfile.mktemp(prefix='tree_unittest_')
            binary_path = tempfile.mktemp(prefix='tree_unittest_')
            fs.write_text_file(text_path, unicode_text_string)
            contents = fs.read_binary_file(text_path)
            self.assertEqual(contents, hex_equivalent)

            fs.write_binary_file(binary_path, hex_equivalent)
            text_contents = fs.read_text_file(binary_path)
            self.assertEqual(text_contents, unicode_text_string)
        finally:
            if text_path and fs.isfile(text_path):
                os.remove(text_path)
            if binary_path and fs.isfile(binary_path):
                os.remove(binary_path)

    def test_read_binary_file__missing(self):
        fs = FileSystem()
        with self.assertRaises(IOError):
            fs.read_binary_file(self._missing_file)

    def test_read_text_file__missing(self):
        fs = FileSystem()
        with self.assertRaises(IOError):
            fs.read_text_file(self._missing_file)

    def test_remove_file_with_retry(self):
        RealFileSystemTest._remove_failures = 2

        def remove_with_exception(filename):
            RealFileSystemTest._remove_failures -= 1
            if RealFileSystemTest._remove_failures >= 0:
                try:
                    raise WindowsError
                except NameError:
                    raise FileSystem._WindowsError

        fs = FileSystem()
        self.assertTrue(fs.remove('filename', remove_with_exception))
        self.assertEqual(-1, RealFileSystemTest._remove_failures)

    def test_sep(self):
        fs = FileSystem()

        self.assertEqual(fs.sep, os.sep)
        self.assertEqual(fs.join('foo', 'bar'), os.path.join('foo', 'bar'))

    def test_long_paths(self):
        # This mostly tests UNC paths on Windows for path names > 260 chars.
        # Currently, only makedirs, copyfile, and various open methods are
        # verified to support UNC paths.
        long_path = self.fs.join(self.generic_test_dir, 'x' * 100, 'y' * 100,
                                 'z' * 100)
        self.fs.maybe_make_directory(long_path)
        file1 = self.fs.join(long_path, 'foo')
        file2 = self.fs.join(long_path, 'bar')
        self.fs.write_text_file(file1, 'hello')
        self.fs.copyfile(file1, file2)
        content = self.fs.read_text_file(file2)
        self.fs.remove(file2)  # No exception should be raised.
        self.assertEqual(content, 'hello')
        # (long_path is left in the filesystem and its removal is tested during cleanup.)

        # On Windows, rmtree can handle trees containing long paths as long as
        # the root is not a long path.
        long_path1 = self.fs.join(self.generic_test_dir, 'a' * 100,
                                  'b' * 100 + " 'b")
        long_path2 = self.fs.join(long_path1, 'c' * 100)
        self.fs.maybe_make_directory(long_path2)
        file1 = self.fs.join(long_path2, 'foo')
        self.fs.write_text_file(file1, 'hello')
        if sys.platform == 'win32':
            with self.assertRaises(AssertionError):
                self.fs.rmtree(long_path2, ignore_errors=False)
        else:
            self.fs.rmtree(long_path2, ignore_errors=False)
        self.fs.rmtree(long_path1, ignore_errors=False)
class WPTMetadataBuilder(object):
    def __init__(self, expectations, port):
        """
        Args:
            expectations: a blinkpy.web_tests.models.test_expectations.TestExpectations object
            port: a blinkpy.web_tests.port.Port object
        """
        self.expectations = expectations
        self.port = port
        # TODO(lpz): Use self.fs everywhere in this class and add tests
        self.fs = FileSystem()
        self.wpt_manifest = self.port.wpt_manifest("external/wpt")
        self.metadata_output_dir = ""
        self.checked_in_metadata_dir = ""
        self.process_baselines = True
        self.handle_annotations = True

    def run(self, args=None):
        """Main entry point to parse flags and execute the script."""
        parser = argparse.ArgumentParser(description=__doc__)
        parser.add_argument(
            "--metadata-output-dir",
            help="The directory to output the metadata files into.")
        parser.add_argument(
            "--checked-in-metadata-dir",
            help="Root directory of any checked-in WPT metadata files to use. "
            "If set, these files will take precedence over legacy expectations "
            "and baselines when both exist for a test.")
        parser.add_argument(
            '-v',
            '--verbose',
            action='store_true',
            help='More verbose logging.')
        parser.add_argument(
            "--process-baselines",
            action="store_true",
            default=True,
            dest="process_baselines",
            help="Whether to translate baseline (-expected.txt) files into WPT "
            "metadata files. This translation is lossy and results in any "
            "subtest being accepted by wptrunner.")
        parser.add_argument("--no-process-baselines",
                            action="store_false",
                            dest="process_baselines")
        parser.add_argument(
            "--handle-annotations",
            action="store_true",
            default=True,
            dest="handle_annotations",
            help="Whether to handle annotations in expectations files. These "
            "are trailing comments that give additional details for how "
            "to translate an expectation into WPT metadata.")
        parser.add_argument("--no-handle-annotations",
                            action="store_false",
                            dest="handle_annotations")
        args = parser.parse_args(args)

        log_level = logging.DEBUG if args.verbose else logging.INFO
        configure_logging(logging_level=log_level, include_time=True)

        self.metadata_output_dir = args.metadata_output_dir
        self.checked_in_metadata_dir = args.checked_in_metadata_dir
        self.process_baselines = args.process_baselines
        self.handle_annotations = args.handle_annotations
        self._build_metadata_and_write()

        return 0

    @staticmethod
    def status_bitmap_to_string(test_status_bitmap):
        statuses = []
        result = ""
        if test_status_bitmap & SUBTEST_FAIL:
            result += "  blink_expect_any_subtest_status: True # wpt_metadata_builder.py\n"

        if test_status_bitmap & HARNESS_ERROR:
            statuses.append("ERROR")
        if test_status_bitmap & TEST_PASS:
            # We need both PASS and OK. Reftests will PASS while testharness
            # tests are OK.
            statuses.append("PASS")
            statuses.append("OK")
        if test_status_bitmap & TEST_FAIL:
            # We need both FAIL and ERROR. Reftests will FAIL while testharness
            # tests have ERRORs.
            statuses.append("FAIL")
            statuses.append("ERROR")
        if test_status_bitmap & TEST_TIMEOUT:
            statuses.append("TIMEOUT")
        if test_status_bitmap & TEST_CRASH:
            statuses.append("CRASH")
        if test_status_bitmap & TEST_PRECONDITION_FAILED:
            statuses.append("PRECONDITION_FAILED")

        if statuses:
            result += "  expected: [%s]\n" % ", ".join(statuses)
        return result

    def _build_metadata_and_write(self):
        """Build the metadata files and write them to disk."""
        if os.path.exists(self.metadata_output_dir):
            _log.debug("Output dir exists, deleting: %s",
                       self.metadata_output_dir)
            import shutil
            shutil.rmtree(self.metadata_output_dir)

        tests_for_metadata = self.get_tests_needing_metadata()
        _log.info("Found %d tests requiring metadata", len(tests_for_metadata))
        for test_name, test_status_bitmap in tests_for_metadata.items():
            filename, file_contents = self.get_metadata_filename_and_contents(
                test_name, test_status_bitmap)
            if not filename or not file_contents:
                continue
            self._write_to_file(filename, file_contents)

        if self.checked_in_metadata_dir and os.path.exists(
                self.checked_in_metadata_dir):
            _log.info("Copying checked-in WPT metadata on top of translated "
                      "files.")
            self._copy_checked_in_metadata()
        else:
            _log.warning("Not using checked-in WPT metadata, path is empty or "
                         "does not exist: %s" % self.checked_in_metadata_dir)

        # Finally, output a stamp file with the same name as the output
        # directory. The stamp file is empty, it's only used for its mtime.
        # This makes the GN build system happy (see crbug.com/995112).
        with open(self.metadata_output_dir + ".stamp", "w"):
            pass

    def _copy_checked_in_metadata(self):
        """Copies checked-in metadata files to the metadata output directory."""
        for filename in self.fs.files_under(self.checked_in_metadata_dir):
            # We match any .ini files in the path. This will find .ini files
            # other than just metadata (such as tox.ini), but that is ok
            # since wptrunner will just ignore those.
            if not fnmatch.fnmatch(filename, "*.ini"):
                continue

            # Found a checked-in .ini file. Copy it to the metadata output
            # directory in the same sub-path as where it is checked in.
            # So /checked/in/a/b/c.ini goes to /metadata/out/a/b/c.ini
            output_path = filename.replace(self.checked_in_metadata_dir,
                                           self.metadata_output_dir)
            if not self.fs.exists(self.fs.dirname(output_path)):
                self.fs.maybe_make_directory(self.fs.dirname(output_path))
            _log.debug("Copying %s to %s" % (filename, output_path))
            self.fs.copyfile(filename, output_path)

    def _write_to_file(self, filename, file_contents):
        # Write the contents to the file name
        if not os.path.exists(os.path.dirname(filename)):
            os.makedirs(os.path.dirname(filename))
        # Note that we append to the metadata file in order to allow multiple
        # tests to be present in the same .ini file (ie: for multi-global tests)
        with open(filename, "a") as metadata_file:
            metadata_file.write(file_contents)

    def get_tests_needing_metadata(self):
        """Determines which tests need metadata files.

        This function loops over the tests to be run and checks whether each test
        has an expectation (eg: in TestExpectations) and/or a baseline (ie:
        test-name-expected.txt). The existence of those things will determine
        the information that will be emitted into the tests's metadata file.

        Returns:
            A dict. The key is the string test name and the value is an integer
            bitmap of statuses for the test.
        """
        tests_needing_metadata = defaultdict(int)
        for test_name in self.port.tests(paths=["external/wpt"]):
            # First check for expectations. If a test is skipped then we do not
            # look for more statuses
            expectation_line = self.expectations.get_expectations(test_name)
            self._handle_test_with_expectation(test_name, expectation_line,
                                               tests_needing_metadata)
            if self._test_was_skipped(test_name, tests_needing_metadata):
                # Do not consider other statuses if a test is skipped
                continue

            # Check if the test has a baseline
            if self.process_baselines:
                test_baseline = self.port.expected_text(test_name)
                if not test_baseline:
                    continue
                self._handle_test_with_baseline(test_name, test_baseline,
                                                tests_needing_metadata)
        return tests_needing_metadata

    def _handle_test_with_expectation(self, test_name, expectation_line,
                                      status_dict):
        """Handles a single test expectation and updates |status_dict|."""
        test_statuses = expectation_line.results
        annotations = expectation_line.trailing_comments
        if ResultType.Skip in test_statuses:
            # Skips are handled alone, so don't look at any other statuses
            status_dict[test_name] |= SKIP_TEST
            return

        # Guard against the only test_status being Pass (without any
        # annotations), we don't want to create metadata for such a test.
        if (len(test_statuses) == 1 and ResultType.Pass in test_statuses
                and not annotations):
            return

        status_bitmap = 0
        if ResultType.Pass in test_statuses:
            status_bitmap |= TEST_PASS
        if ResultType.Failure in test_statuses:
            status_bitmap |= TEST_FAIL
        if ResultType.Timeout in test_statuses:
            status_bitmap |= TEST_TIMEOUT
        if ResultType.Crash in test_statuses:
            status_bitmap |= TEST_CRASH
        if self.handle_annotations and annotations:
            if "wpt_subtest_failure" in annotations:
                status_bitmap |= SUBTEST_FAIL
            if "wpt_precondition_failed" in annotations:
                status_bitmap |= TEST_PRECONDITION_FAILED
        # Update status bitmap for this test
        status_dict[test_name] |= status_bitmap

    def _test_was_skipped(self, test_name, status_dict):
        """Returns whether |test_name| is marked as skipped in |status_dict|."""
        return test_name in status_dict and (
            status_dict[test_name] & SKIP_TEST)

    def _handle_test_with_baseline(self, test_name, test_baseline,
                                   status_dict):
        """Handles a single test baseline and updates |status_dict|."""
        status_bitmap = 0
        if re.search(r"^(FAIL|NOTRUN|TIMEOUT)", test_baseline, re.MULTILINE):
            status_bitmap |= SUBTEST_FAIL
        if re.search(r"^Harness Error\.", test_baseline, re.MULTILINE):
            status_bitmap |= HARNESS_ERROR
        if status_bitmap > 0:
            status_dict[test_name] |= status_bitmap
        else:
            # Treat this as an error because we don't want it to happen.
            # Either the non-FAIL statuses need to be handled here, or the
            # baseline is all PASS which should just be deleted.
            _log.error("Test %s has a non-FAIL baseline" % test_name)

    def _metadata_filename_from_test_file(self, wpt_test_file):
        """Returns the filename of the metadata (.ini) file for the test.

        Args:
            wpt_test_file: The file on disk that the specified test lives in.
                For multi-global tests this is usually a ".js" file.

        Returns:
            The fully-qualified string path of the metadata file for this test.
        """
        assert "?" not in wpt_test_file
        test_file_parts = wpt_test_file.split("/")
        return os.path.join(self.metadata_output_dir,
                            *test_file_parts) + ".ini"

    def _metadata_inline_test_name_from_test_name(self, wpt_test_name):
        """Returns the test name to use *inside* of a metadata file.

        The inline name inside the metadata file is the logical name of the
        test without any subdirectories.
        For multi-global tests this means that it must have the specific scope
        of the test (eg: worker, window, etc). This name must also include any
        variants that are set.

        Args:
            wpt_test_name: The fully-qualified test name which contains all
                subdirectories as well as scope (for multi-globals), and
                variants.

        Returns:
            The string test name inside of the metadata file.
        """
        # To generate the inline test name we basically want to strip away the
        # subdirectories from the test name, being careful not to accidentally
        # clobber the variant.
        variant_split = wpt_test_name.split("?")
        test_path = variant_split[0]
        test_name_part = test_path.split("/")[-1]
        variant = "?" + variant_split[1] if len(variant_split) == 2 else ""
        return test_name_part + variant

    def get_metadata_filename_and_contents(self,
                                           chromium_test_name,
                                           test_status_bitmap=0):
        """Determines the metadata filename and contents for the specified test.

        The metadata filename is derived from the test name but will differ if
        the expectation is for a single test or for a directory of tests. The
        contents of the metadata file will also differ for those two cases.

        Args:
            chromium_test_name: A Chromium test name from the expectation file,
                which starts with `external/wpt`.
            test_status_bitmap: An integer containing additional data about the
                status, such as enumerating flaky statuses, or whether a test has
                a combination of harness error and subtest failure.

        Returns:
            A pair of strings, the first is the path to the metadata file and
            the second is the contents to write to that file. Or None if the
            test does not need a metadata file.
        """
        # Ignore expectations for non-WPT tests
        if (not chromium_test_name
                or not chromium_test_name.startswith('external/wpt')):
            return None, None

        # Split the test name by directory. We omit the first 2 entries because
        # they are 'external' and 'wpt' and these don't exist in the WPT's test
        # names.
        wpt_test_name_parts = chromium_test_name.split("/")[2:]
        # The WPT test name differs from the Chromium test name in that the WPT
        # name omits `external/wpt`.
        wpt_test_name = "/".join(wpt_test_name_parts)

        # Check if this is a test file or a test directory
        is_test_dir = chromium_test_name.endswith("/")
        metadata_filename = None
        metadata_file_contents = None
        if is_test_dir:
            # A test directory gets one metadata file called __dir__.ini and all
            # tests in that dir are skipped.
            metadata_filename = os.path.join(self.metadata_output_dir,
                                             *wpt_test_name_parts)
            metadata_filename = os.path.join(metadata_filename, "__dir__.ini")
            _log.debug("Creating a dir-wide ini file %s", metadata_filename)

            metadata_file_contents = self._get_dir_disabled_string()
        else:
            # For individual tests, we create one file per test, with the name
            # of the test in the file as well.
            test_file_path = self.wpt_manifest.file_path_for_test_url(
                wpt_test_name)
            if not test_file_path:
                _log.info("Could not find file for test %s, skipping" %
                          wpt_test_name)
                return None, None

            metadata_filename = self._metadata_filename_from_test_file(
                test_file_path)
            _log.debug("Creating a test ini file %s with status_bitmap %s",
                       metadata_filename, test_status_bitmap)
            inline_test_name = self._metadata_inline_test_name_from_test_name(
                wpt_test_name)
            metadata_file_contents = self._get_test_failed_string(
                inline_test_name, test_status_bitmap)

        return metadata_filename, metadata_file_contents

    def _get_dir_disabled_string(self):
        return "disabled: wpt_metadata_builder.py\n"

    def _get_test_disabled_string(self, test_name):
        return "[%s]\n  disabled: wpt_metadata_builder.py\n" % test_name

    def _get_test_failed_string(self, inline_test_name, test_status_bitmap):
        # The contents of the metadata file is two lines:
        # 1. the inline name of the WPT test pathinside square brackets. This
        #    name contains the test scope (for multi-globals) and variants.
        # 2. an indented line with the test status and reason
        result = "[%s]\n" % inline_test_name

        # A skipped test is a little special in that it doesn't happen along with
        # any other status. So we compare directly against SKIP_TEST and also
        # return right away.
        if test_status_bitmap == SKIP_TEST:
            result += "  disabled: wpt_metadata_builder.py\n"
            return result

        # Other test statuses can exist together. But ensure we have at least one.
        expected_string = self.status_bitmap_to_string(test_status_bitmap)
        if expected_string:
            result += expected_string
        return result
示例#3
0
class BaseWptScriptAdapter(common.BaseIsolatedScriptArgsAdapter):
    """The base class for script adapters that use wptrunner to execute web
    platform tests. This contains any code shared between these scripts, such
    as integrating output with the results viewer. Subclasses contain other
    (usually platform-specific) logic."""

    def __init__(self):
        super(BaseWptScriptAdapter, self).__init__()
        self.fs = FileSystem()
        host = Host()
        self.port = host.port_factory.get()
        self.wpt_manifest = self.port.wpt_manifest("external/wpt")

    def generate_test_output_args(self, output):
        return ['--log-chromium', output]

    def generate_sharding_args(self, total_shards, shard_index):
        return ['--total-chunks=%d' % total_shards,
                # shard_index is 0-based but WPT's this-chunk to be 1-based
                '--this-chunk=%d' % (shard_index + 1),
                # The default sharding strategy is to shard by directory. But
                # we want to hash each test to determine which shard runs it.
                # This allows running individual directories that have few
                # tests across many shards.
                '--chunk-type=hash']

    def do_post_test_run_tasks(self):
        # Move json results into layout-test-results directory
        results_dir = os.path.dirname(self.options.isolated_script_test_output)
        layout_test_results = os.path.join(results_dir, 'layout-test-results')
        if os.path.exists(layout_test_results):
            self.fs.rmtree(layout_test_results)
        self.fs.maybe_make_directory(layout_test_results)

        # Perform post-processing of wptrunner output
        self.process_wptrunner_output()

        self.fs.copyfile(self.options.isolated_script_test_output,
                        os.path.join(layout_test_results, 'full_results.json'))
        # create full_results_jsonp.js file which is used to
        # load results into the results viewer
        self.fs.write_text_file(
            os.path.join(layout_test_results, 'full_results_jsonp.js'),
            'ADD_FULL_RESULTS(%s);' % self.fs.read_text_file(
                self.options.isolated_script_test_output))

        # copy layout test results viewer to layout-test-results directory
        self.fs.copyfile(
            os.path.join(WEB_TESTS_DIR, 'fast', 'harness', 'results.html'),
            os.path.join(layout_test_results, 'results.html'))

    def process_wptrunner_output(self):
        """Post-process the output generated by wptrunner.

        This output contains a single large json file containing the raw content
        or artifacts which need to be extracted into their own files and removed
        from the json file (to avoid duplication)."""
        output_json = json.loads(
            self.fs.read_text_file(self.options.isolated_script_test_output))
        test_json = output_json["tests"]
        results_dir = os.path.dirname(self.options.isolated_script_test_output)
        self._process_test_leaves(results_dir, output_json["path_delimiter"],
                                  test_json, "")
        # Write output_json back to the same file after modifying it in memory
        self.fs.write_text_file(self.options.isolated_script_test_output,
                                json.dumps(output_json))

    def _process_test_leaves(self, results_dir, delim, root_node, path_so_far):
        """Finds and processes each test leaf below the specified root.

        This will recursively traverse the trie of results in the json output,
        keeping track of the path to each test and identifying leaves by the
        presence of certain attributes.

        Args:
            results_dir: str path to the dir that results are stored
            delim: str delimiter to be used for test names
            root_node: dict representing the root of the trie we're currently
                looking at
            path_so_far: str the path to the current root_node in the trie
        """
        if "actual" in root_node:
            # Found a leaf, process it
            if "artifacts" not in root_node:
                return
            log_artifact = root_node["artifacts"].pop("log", None)
            if log_artifact:
                artifact_subpath = self._write_log_artifact(
                    test_failures.FILENAME_SUFFIX_ACTUAL,
                    results_dir, path_so_far, log_artifact)
                root_node["artifacts"]["actual_text"] = [artifact_subpath]
                # Try to locate the expected output of this test, if it exists.
                expected_subpath = self._maybe_write_expected_output(
                    results_dir, path_so_far)
                if expected_subpath:
                    root_node["artifacts"]["expected_text"] = [expected_subpath]

            screenshot_artifact = root_node["artifacts"].pop("screenshots",
                                                             None)
            if screenshot_artifact:
                screenshot_paths_dict = self._write_screenshot_artifact(
                    results_dir, path_so_far, screenshot_artifact)
                for screenshot_key, path in screenshot_paths_dict.items():
                    root_node["artifacts"][screenshot_key] = [path]

            crashlog_artifact = root_node["artifacts"].pop("wpt_crash_log",
                                                           None)
            if crashlog_artifact:
                artifact_subpath = self._write_log_artifact(
                    test_failures.FILENAME_SUFFIX_CRASH_LOG,
                    results_dir, path_so_far, crashlog_artifact)

            return

        # We're not at a leaf node, continue traversing the trie.
        for key in root_node:
            # Append the key to the path, separated by the delimiter. However if
            # the path is empty, skip the delimiter to avoid a leading slash in
            # the path.
            new_path = path_so_far + delim + key if path_so_far else key
            self._process_test_leaves(results_dir, delim, root_node[key],
                                      new_path)

    def _maybe_write_expected_output(self, results_dir, test_name):
        """Attempts to create an expected output artifact for the test.

        The expected output of tests is checked-in to the source tree beside the
        test itself, with a .ini extension. Not all tests have expected output.

        Args:
            results_dir: str path to the dir to write the output to
            test_name: str name of the test to write expected output for

        Returns:
            string path to the artifact file that the expected output was
            written to, relative to the directory that the original output is
            located. Returns None if there is no expected output for this test.
        """
        test_file_subpath = self.wpt_manifest.file_path_for_test_url(test_name)
        if not test_file_subpath:
            # Not all tests in the output have a corresponding test file. This
            # could be print-reftests (which are unsupported by the blinkpy
            # manifest) or .any.js tests (which appear in the output even though
            # they do not actually run - they have corresponding tests like
            # .any.worker.html which are covered here).
            return None

        test_file_path = os.path.join(EXTERNAL_WPT_TESTS_DIR, test_file_subpath)
        expected_ini_path = test_file_path + ".ini"
        if not os.path.exists(expected_ini_path):
            return None

        # This test has checked-in expected output. It needs to be copied to the
        # results viewer directory and renamed from <test>.ini to
        # <test>-expected.txt
        # Note: Here we read-in the checked-in ini file and pass its contents to
        # |_write_log_artifact| to reuse code. This is probably less efficient
        # than just copying, but makes for cleaner code.
        contents = self.fs.read_text_file(expected_ini_path)
        return self._write_log_artifact(test_failures.FILENAME_SUFFIX_EXPECTED,
                                        results_dir, test_name, [contents])

    def _write_log_artifact(self, suffix, results_dir, test_name, log_artifact):
        """Writes a log artifact to disk.

        A log artifact contains some form of output for a test. It is written to
        a txt file with a suffix generated from the log type.

        Args:
            suffix: str suffix of the artifact to write, e.g.
                test_failures.FILENAME_SUFFIX_ACTUAL
            results_dir: str path to the directory that results live in
            test_name: str name of the test that this artifact is for
            log_artifact: list of strings, the log entries for this test from
                the json output.

        Returns:
            string path to the artifact file that the log was written to,
            relative to the directory that the original output is located.
        """
        log_artifact_sub_path = (
            os.path.join("layout-test-results",
                         self.port.output_filename(test_name, suffix, ".txt"))
        )
        log_artifact_full_path = os.path.join(results_dir,
                                              log_artifact_sub_path)
        if not os.path.exists(os.path.dirname(log_artifact_full_path)):
            self.fs.maybe_make_directory(
                os.path.dirname(log_artifact_full_path))
        self.fs.write_text_file(log_artifact_full_path,
                                "\n".join(log_artifact))

        return log_artifact_sub_path

    def _write_screenshot_artifact(self, results_dir, test_name,
                                   screenshot_artifact):
        """Write screenshot artifact to disk.

        The screenshot artifact is a list of strings, each of which has the
        format <url>:<base64-encoded PNG>. Each url-png pair is a screenshot of
        either the test, or one of its refs. We can identify which screenshot is
        for the test by comparing the url piece to the test name.

        Args:
           results_dir: str path to the directory that results live in
           test:name str name of the test that this artifact is for
           screenshot_artifact: list of strings, each being a url-png pair as
               described above.

        Returns:
             A dict mapping the screenshot key (ie: actual, expected) to the
             path of the file for that screenshot
        """
        result={}
        for screenshot_pair in screenshot_artifact:
            screenshot_split = screenshot_pair.split(":")
            url = screenshot_split[0]
            # The url produced by wptrunner will have a leading / which we trim
            # away for easier comparison to the test_name below.
            if url.startswith("/"):
                url = url[1:]
            image_bytes = base64.b64decode(screenshot_split[1].strip())

            screenshot_key = "expected_image"
            file_suffix = test_failures.FILENAME_SUFFIX_EXPECTED
            if test_name == url:
                screenshot_key = "actual_image"
                file_suffix = test_failures.FILENAME_SUFFIX_ACTUAL

            screenshot_sub_path = (
                os.path.join("layout-test-results",
                             self.port.output_filename(
                                 test_name, file_suffix, ".png"))
            )
            result[screenshot_key] = screenshot_sub_path

            screenshot_full_path = os.path.join(results_dir,screenshot_sub_path)
            if not os.path.exists(os.path.dirname(screenshot_full_path)):
                self.fs.maybe_make_directory(
                    os.path.dirname(screenshot_full_path))
            # Note: we are writing raw bytes to this file
            self.fs.write_binary_file(screenshot_full_path, image_bytes)
        return result