Пример #1
0
def run_amber(
    amber_script_file: Path,
    output_dir: Path,
    dump_image: bool,
    dump_buffer: bool,
    amber_path: Path,
    skip_render: bool = False,
    debug_layers: bool = False,
    icd: Optional[Path] = None,
) -> Path:
    with util.file_open_text(result_util.get_amber_log_path(output_dir),
                             "w") as log_file:
        try:
            gflogging.push_stream_for_logging(log_file)

            run_amber_helper(
                amber_script_file,
                output_dir,
                dump_image,
                dump_buffer,
                amber_path,
                skip_render,
                debug_layers,
                icd,
            )
        finally:
            gflogging.pop_stream_for_logging()

    return output_dir
Пример #2
0
def run_amber_on_device(
    amber_script_file: Path,
    output_dir: Path,
    dump_image: bool,
    dump_buffer: bool,
    skip_render: bool = False,
    serial: Optional[str] = None,
) -> Path:

    with file_open_text(result_util.get_amber_log_path(output_dir),
                        "w") as log_file:
        try:
            gflogging.push_stream_for_logging(log_file)

            run_amber_on_device_helper(
                amber_script_file,
                output_dir,
                dump_image,
                dump_buffer,
                skip_render,
                serial,
            )
        finally:
            gflogging.pop_stream_for_logging()

    return output_dir
Пример #3
0
def run_glsl_reduce(
    source_dir: Path,
    name_of_shader_to_reduce: str,
    output_dir: Path,
    binary_manager: binaries_util.BinaryManager,
    preserve_semantics: bool = False,
    extra_args: Optional[List[str]] = None,
) -> Path:

    input_shader_job = source_dir / name_of_shader_to_reduce / test_util.SHADER_JOB

    glsl_reduce_path = util.tool_on_path(
        "glsl-reduce",
        str(
            binary_manager.get_binary_path_by_name(
                "graphicsfuzz-tool").path.parent),
    )

    cmd = [
        str(glsl_reduce_path),
        str(input_shader_job),
        "--output",
        str(output_dir),
    ]

    if preserve_semantics:
        cmd.append("--preserve-semantics")

    if extra_args:
        cmd.extend(extra_args)

    cmd.extend([
        # This ensures the arguments that follow are all positional arguments.
        "--",
        "gfauto_interestingness_test",
        str(source_dir),
        # --override_shader_job requires two parameters to follow; the second will be added by glsl-reduce (the shader.json file).
        "--override_shader_job",
        str(name_of_shader_to_reduce),
    ])

    # Log the reduction.
    with util.file_open_text(output_dir / "command.log", "w") as f:
        gflogging.push_stream_for_logging(f)
        try:
            # The reducer can fail, but it will typically output an exception file, so we can ignore the exit code.
            subprocess_util.run(cmd, verbose=True, check_exit_code=False)
        finally:
            gflogging.pop_stream_for_logging()

    return output_dir
Пример #4
0
def main() -> None:
    parser = argparse.ArgumentParser(description="Fuzz")

    parser.add_argument(
        "--settings",
        help="Path to the settings JSON file for this fuzzing instance.",
        default=str(settings_util.DEFAULT_SETTINGS_FILE_PATH),
    )

    parser.add_argument(
        "--iteration_seed",
        help="The seed to use for one fuzzing iteration (useful for reproducing an issue).",
    )

    parser.add_argument(
        "--use_spirv_fuzz",
        help="Do fuzzing using spirv-fuzz, which must be on your PATH.",
        action="store_true",
    )

    parser.add_argument(
        "--force_no_stack_traces",
        help="Continue even if we cannot get stack traces (using catchsegv or cdb).",
        action="store_true",
    )

    parsed_args = parser.parse_args(sys.argv[1:])

    settings_path = Path(parsed_args.settings)
    iteration_seed: Optional[int] = None if parsed_args.iteration_seed is None else int(
        parsed_args.iteration_seed
    )
    use_spirv_fuzz: bool = parsed_args.use_spirv_fuzz
    force_no_stack_traces: bool = parsed_args.force_no_stack_traces

    with util.file_open_text(Path(f"log_{get_random_name()}.txt"), "w") as log_file:
        gflogging.push_stream_for_logging(log_file)
        try:
            main_helper(
                settings_path, iteration_seed, use_spirv_fuzz, force_no_stack_traces
            )
        except settings_util.NoSettingsFile as exception:
            log(str(exception))
        finally:
            gflogging.pop_stream_for_logging()
Пример #5
0
def main() -> None:
    parser = argparse.ArgumentParser(
        description=
        "Fuzz devices using glsl-fuzz and/or spirv-fuzz to generate tests. "
        "By default, repeatedly generates tests using glsl-fuzz. "
        "You can instead specify the number of times each tool will run; "
        "glsl-fuzz runs G times, then spirv-fuzz runs S times, then the pattern repeats. "
        "By default, G=0 and S=0, in which case glsl-fuzz is hardcoded to run. "
        'Each run of glsl-fuzz/spirv-fuzz uses a random "iteration seed", which can be used to replay the invocation of the tool and the steps that follow. ',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
    )

    parser.add_argument(
        "--settings",
        help="Path to the settings JSON file for this fuzzing instance.",
        default=str(settings_util.DEFAULT_SETTINGS_FILE_PATH),
    )

    parser.add_argument(
        "--iteration_seed",
        help=
        "The seed to use for one fuzzing iteration (useful for reproducing an issue).",
    )

    parser.add_argument(
        "--glsl_fuzz_iterations",
        metavar="G",
        help=
        "Run glsl-fuzz G times to generate some tests, before moving on to the next tool.",
        action="store",
        default=0,
        type=int,
    )

    parser.add_argument(
        "--spirv_fuzz_iterations",
        metavar="S",
        help=
        "Run spirv-fuzz S times to generate some tests, before moving on to the next tool.",
        action="store",
        default=0,
        type=int,
    )

    parser.add_argument(
        "--allow_no_stack_traces",
        help=
        "Continue even if we cannot get stack traces (using catchsegv or cdb).",
        action="store_true",
    )

    parser.add_argument(
        "--active_device",
        help=
        "Add an active device name, overriding those in the settings.json file. "
        "Ignored when --update_ignored_crash_signatures is passed."
        "Can be used multiple times to add multiple devices. "
        "E.g. --active_device host --active_device host_with_alternative_icd. "
        "This allows sharing a single settings.json file between multiple instances of gfauto_fuzz. "
        "Note that a host_preprocessor device will automatically be added as the first active device, if it is missing. ",
        action="append",
    )

    parser.add_argument(
        "--update_ignored_crash_signatures",
        metavar="GERRIT_COOKIE",
        help=
        "When passed, gfauto will download and run the existing GraphicsFuzz AmberScript tests from Khronos vk-gl-cts on "
        "the active devices listed in the settings.json file. "
        "It will then update the ignored_crash_signatures field for each active device in the settings.json file based on the crash signatures seen. "
        "Requires Git. Requires Khronos membership. Obtain the Gerrit cookie as follows. "
        + download_cts_gf_tests.GERRIT_COOKIE_INSTRUCTIONS,
        action="store",
        default=None,
        type=str,
    )

    parsed_args = parser.parse_args(sys.argv[1:])

    settings_path = Path(parsed_args.settings)
    iteration_seed: Optional[
        int] = None if parsed_args.iteration_seed is None else int(
            parsed_args.iteration_seed)
    glsl_fuzz_iterations: int = parsed_args.glsl_fuzz_iterations
    spirv_fuzz_iterations: int = parsed_args.spirv_fuzz_iterations
    allow_no_stack_traces: bool = parsed_args.allow_no_stack_traces
    active_device_names: Optional[List[str]] = parsed_args.active_device
    update_ignored_crash_signatures_gerrit_cookie: Optional[str] = (
        parsed_args.update_ignored_crash_signatures)

    # E.g. [GLSL_FUZZ, GLSL_FUZZ, SPIRV_FUZZ] will run glsl-fuzz twice, then spirv-fuzz once, then repeat.
    fuzzing_tool_pattern = get_fuzzing_tool_pattern(
        glsl_fuzz_iterations=glsl_fuzz_iterations,
        spirv_fuzz_iterations=spirv_fuzz_iterations,
    )

    with util.file_open_text(Path(f"log_{get_random_name()}.txt"),
                             "w") as log_file:
        gflogging.push_stream_for_logging(log_file)
        try:
            main_helper(
                settings_path,
                iteration_seed,
                fuzzing_tool_pattern,
                allow_no_stack_traces,
                active_device_names=active_device_names,
                update_ignored_crash_signatures_gerrit_cookie=
                update_ignored_crash_signatures_gerrit_cookie,
            )
        except settings_util.NoSettingsFile as exception:
            log(str(exception))
        finally:
            gflogging.pop_stream_for_logging()
Пример #6
0
def fuzz_glsl(  # pylint: disable=too-many-locals;
    staging_dir: Path,
    reports_dir: Path,
    fuzz_failures_dir: Path,
    active_devices: List[Device],
    references: List[Path],
    donors_dir: Path,
    settings: Settings,
    binary_manager: binaries_util.BinaryManager,
) -> None:
    staging_name = staging_dir.name
    template_source_dir = staging_dir / "source_template"

    # Pick a randomly chosen reference.
    unprepared_reference_shader_job: Path = random.choice(references)

    # The "graphicsfuzz-tool" tool is designed to be on your PATH so that e.g. ".bat" will be appended on Windows.
    # So we use tool_on_path with a custom PATH to get the actual file we want to execute.
    graphicsfuzz_tool_path = util.tool_on_path(
        "graphicsfuzz-tool",
        str(
            binary_manager.get_binary_path_by_name(
                "graphicsfuzz-tool").path.parent),
    )

    try:
        with util.file_open_text(staging_dir / "log.txt", "w") as log_file:
            try:
                gflogging.push_stream_for_logging(log_file)

                # Create the prepared (for Vulkan GLSL) reference.
                glsl_generate_util.run_prepare_reference(
                    graphicsfuzz_tool_path,
                    unprepared_reference_shader_job,
                    template_source_dir / test_util.REFERENCE_DIR /
                    test_util.SHADER_JOB,
                    legacy_graphics_fuzz_vulkan_arg=settings.
                    legacy_graphics_fuzz_vulkan_arg,
                )

                # Generate the variant (GraphicsFuzz requires the unprepared reference as input).
                glsl_generate_util.run_generate(
                    graphicsfuzz_tool_path,
                    unprepared_reference_shader_job,
                    donors_dir,
                    template_source_dir / test_util.VARIANT_DIR /
                    test_util.SHADER_JOB,
                    seed=str(
                        random.getrandbits(
                            glsl_generate_util.GENERATE_SEED_BITS)),
                    other_args=list(settings.extra_graphics_fuzz_generate_args)
                    if settings.extra_graphics_fuzz_generate_args else None,
                    legacy_graphics_fuzz_vulkan_arg=settings.
                    legacy_graphics_fuzz_vulkan_arg,
                )
            finally:
                gflogging.pop_stream_for_logging()
    except subprocess.CalledProcessError:
        util.mkdirs_p(fuzz_failures_dir)
        if len(list(
                fuzz_failures_dir.iterdir())) < settings.maximum_fuzz_failures:
            util.copy_dir(staging_dir, fuzz_failures_dir / staging_dir.name)
        return

    reference_name = unprepared_reference_shader_job.stem

    stable_shader = reference_name.startswith("stable_")

    common_spirv_args = list(settings.common_spirv_args)

    test_dirs = [
        make_test(
            template_source_dir,
            staging_dir / f"{staging_name}_no_opt_test",
            spirv_opt_args=None,
            binary_manager=binary_manager,
            derived_from=reference_name,
            stable_shader=stable_shader,
            common_spirv_args=common_spirv_args,
        ),
        make_test(
            template_source_dir,
            staging_dir / f"{staging_name}_opt_O_test",
            spirv_opt_args=["-O"],
            binary_manager=binary_manager,
            derived_from=reference_name,
            stable_shader=stable_shader,
            common_spirv_args=common_spirv_args,
        ),
    ]

    if not settings.spirv_opt_just_o:
        test_dirs += [
            make_test(
                template_source_dir,
                staging_dir / f"{staging_name}_opt_Os_test",
                spirv_opt_args=["-Os"],
                binary_manager=binary_manager,
                derived_from=reference_name,
                stable_shader=stable_shader,
                common_spirv_args=common_spirv_args,
            ),
            make_test(
                template_source_dir,
                staging_dir / f"{staging_name}_opt_rand1_test",
                spirv_opt_args=spirv_opt_util.random_spirv_opt_args(),
                binary_manager=binary_manager,
                derived_from=reference_name,
                stable_shader=stable_shader,
                common_spirv_args=common_spirv_args,
            ),
            make_test(
                template_source_dir,
                staging_dir / f"{staging_name}_opt_rand2_test",
                spirv_opt_args=spirv_opt_util.random_spirv_opt_args(),
                binary_manager=binary_manager,
                derived_from=reference_name,
                stable_shader=stable_shader,
                common_spirv_args=common_spirv_args,
            ),
            make_test(
                template_source_dir,
                staging_dir / f"{staging_name}_opt_rand3_test",
                spirv_opt_args=spirv_opt_util.random_spirv_opt_args(),
                binary_manager=binary_manager,
                derived_from=reference_name,
                stable_shader=stable_shader,
                common_spirv_args=common_spirv_args,
            ),
        ]

    for test_dir in test_dirs:
        interrupt_util.interrupt_if_needed()
        if handle_test(test_dir, reports_dir, active_devices, binary_manager,
                       settings):
            # If we generated a report, don't bother trying other optimization combinations.
            break
Пример #7
0
def run_shader_job(  # pylint: disable=too-many-return-statements,too-many-branches, too-many-locals, too-many-statements;
    source_dir: Path,
    output_dir: Path,
    binary_manager: binaries_util.BinaryManager,
    test: Optional[Test] = None,
    device: Optional[Device] = None,
    ignore_test_and_device_binaries: bool = False,
    shader_job_overrides: Iterable[tool.NameAndShaderJob] = (),
    shader_job_shader_overrides: Optional[
        tool.ShaderJobNameToShaderOverridesMap] = None,
) -> Path:

    if not shader_job_shader_overrides:
        shader_job_shader_overrides = {}

    with util.file_open_text(output_dir / "log.txt", "w") as log_file:
        try:
            gflogging.push_stream_for_logging(log_file)

            # TODO: Find amber path. NDK or host.

            # TODO: If Amber is going to be used, check if Amber can use Vulkan debug layers now, and if not, pass that
            #  info down via a bool.

            if not test:
                test = test_util.metadata_read_from_path(
                    source_dir / test_util.TEST_METADATA)

            if not device:
                device = test.device

            log(f"Running test on device:\n{device.name}")

            # We will create a binary_manager child with a restricted set of binaries so that we only use the binaries
            # specified in the test and by the device; if some required binaries are not specified by the test nor the
            # device, there will be an error instead of falling back to our default binaries. But we keep a reference to
            # the parent so we can still access certain "test-independent" binaries like Amber.

            binary_manager_parent = binary_manager

            if not ignore_test_and_device_binaries:
                binary_manager = binary_manager.get_child_binary_manager(
                    list(device.binaries) + list(test.binaries))

            spirv_opt_hash: Optional[str] = None
            spirv_opt_args: Optional[List[str]] = None
            if test.glsl.spirv_opt_args or test.spirv_fuzz.spirv_opt_args:
                spirv_opt_hash = binary_manager.get_binary_by_name(
                    binaries_util.SPIRV_OPT_NAME).version
                spirv_opt_args = (list(test.glsl.spirv_opt_args)
                                  if test.glsl.spirv_opt_args else list(
                                      test.spirv_fuzz.spirv_opt_args))

            shader_jobs = tool.get_shader_jobs(source_dir,
                                               overrides=shader_job_overrides)

            combined_spirv_shader_jobs: List[tool.SpirvCombinedShaderJob] = []

            for shader_job in shader_jobs:
                try:
                    shader_overrides = shader_job_shader_overrides.get(
                        shader_job.name, None)
                    combined_spirv_shader_jobs.append(
                        tool.compile_shader_job(
                            name=shader_job.name,
                            input_json=shader_job.shader_job,
                            work_dir=output_dir / shader_job.name,
                            binary_paths=binary_manager,
                            spirv_opt_args=spirv_opt_args,
                            shader_overrides=shader_overrides,
                        ))
                except subprocess.CalledProcessError:
                    result_util.write_status(output_dir,
                                             fuzz.STATUS_TOOL_CRASH,
                                             shader_job.name)
                    return output_dir
                except subprocess.TimeoutExpired:
                    result_util.write_status(output_dir,
                                             fuzz.STATUS_TOOL_TIMEOUT,
                                             shader_job.name)
                    return output_dir

            # Device types: |preprocess| and |shader_compiler| don't need an AmberScript file.

            # noinspection PyTypeChecker
            if device.HasField("preprocess"):
                # The "preprocess" device type just needs to get this far, so this is a success.
                result_util.write_status(output_dir, fuzz.STATUS_SUCCESS)
                return output_dir

            # noinspection PyTypeChecker
            if device.HasField("shader_compiler"):
                for combined_spirv_shader_job in combined_spirv_shader_jobs:
                    try:
                        shader_compiler_util.run_shader_job(
                            device.shader_compiler,
                            combined_spirv_shader_job.spirv_shader_job,
                            output_dir,
                            binary_manager=binary_manager,
                        )
                    except subprocess.CalledProcessError:
                        result_util.write_status(
                            output_dir,
                            fuzz.STATUS_CRASH,
                            combined_spirv_shader_job.name,
                        )
                        return output_dir
                    except subprocess.TimeoutExpired:
                        result_util.write_status(
                            output_dir,
                            fuzz.STATUS_TIMEOUT,
                            combined_spirv_shader_job.name,
                        )
                        return output_dir

                # The shader compiler succeeded on all files; this is a success.
                result_util.write_status(output_dir, fuzz.STATUS_SUCCESS)
                return output_dir

            # Other device types need an AmberScript file.

            amber_converter_shader_job_files = [
                amber_converter.ShaderJobFile(
                    name_prefix=combined_spirv_shader_job.name,
                    asm_spirv_shader_job_json=combined_spirv_shader_job.
                    spirv_asm_shader_job,
                    glsl_source_json=combined_spirv_shader_job.
                    glsl_source_shader_job,
                    processing_info="",
                ) for combined_spirv_shader_job in combined_spirv_shader_jobs
            ]

            # Check if the first is the reference shader; if so, pull it out into its own variable.

            reference: Optional[amber_converter.ShaderJobFile] = None
            variants = amber_converter_shader_job_files

            if (amber_converter_shader_job_files[0].name_prefix ==
                    test_util.REFERENCE_DIR):
                reference = amber_converter_shader_job_files[0]
                variants = variants[1:]
            elif len(variants) > 1:
                raise AssertionError(
                    "More than one variant, but no reference. This is unexpected."
                )

            amber_script_file = amber_converter.spirv_asm_shader_job_to_amber_script(
                shader_job_file_amber_test=amber_converter.
                ShaderJobFileBasedAmberTest(reference_asm_spirv_job=reference,
                                            variants_asm_spirv_job=variants),
                output_amber_script_file_path=output_dir / "test.amber",
                amberfy_settings=amber_converter.AmberfySettings(
                    spirv_opt_args=spirv_opt_args,
                    spirv_opt_hash=spirv_opt_hash),
            )

            is_compute = bool(
                shader_job_util.get_related_files(
                    combined_spirv_shader_jobs[0].spirv_shader_job,
                    [shader_job_util.EXT_COMP],
                ))

            # noinspection PyTypeChecker
            if device.HasField("host") or device.HasField("swift_shader"):
                icd: Optional[Path] = None

                # noinspection PyTypeChecker
                if device.HasField("swift_shader"):
                    icd = binary_manager.get_binary_path_by_name(
                        binaries_util.SWIFT_SHADER_NAME).path

                # Run the test on the host using Amber.
                host_device_util.run_amber(
                    amber_script_file,
                    output_dir,
                    amber_path=binary_manager_parent.get_binary_path_by_name(
                        binaries_util.AMBER_NAME).path,
                    dump_image=(not is_compute),
                    dump_buffer=is_compute,
                    icd=icd,
                )
                return output_dir

            # noinspection PyTypeChecker
            if device.HasField("android"):

                android_device.run_amber_on_device(
                    amber_script_file,
                    output_dir,
                    dump_image=(not is_compute),
                    dump_buffer=is_compute,
                    serial=device.android.serial,
                )
                return output_dir

            # TODO: For a remote device (which we will probably need to support), use log_a_file to output the
            #  "amber_log.txt" file.

            raise AssertionError(f"Unhandled device type:\n{str(device)}")

        finally:
            gflogging.pop_stream_for_logging()
Пример #8
0
def fuzz_spirv(  # pylint: disable=too-many-locals;
    staging_dir: Path,
    reports_dir: Path,
    fuzz_failures_dir: Path,
    active_devices: List[Device],
    spirv_fuzz_shaders: List[Path],
    settings: Settings,
    binary_manager: binaries_util.BinaryManager,
) -> None:

    staging_name = staging_dir.name
    template_source_dir = staging_dir / "source_template"

    reference_spirv_shader_job_orig_path: Path = random.choice(
        spirv_fuzz_shaders)

    # Copy in a randomly chosen reference.
    reference_spirv_shader_job = shader_job_util.copy(
        reference_spirv_shader_job_orig_path,
        template_source_dir / test_util.REFERENCE_DIR / test_util.SHADER_JOB,
        language_suffix=shader_job_util.SUFFIXES_SPIRV_FUZZ_INPUT,
    )

    try:
        with util.file_open_text(staging_dir / "log.txt", "w") as log_file:
            try:
                gflogging.push_stream_for_logging(log_file)
                spirv_fuzz_util.run_generate_on_shader_job(
                    binary_manager.get_binary_path_by_name("spirv-fuzz").path,
                    reference_spirv_shader_job,
                    template_source_dir / test_util.VARIANT_DIR /
                    test_util.SHADER_JOB,
                    donor_shader_job_paths=spirv_fuzz_shaders,
                    seed=str(
                        random.getrandbits(
                            spirv_fuzz_util.GENERATE_SEED_BITS)),
                    other_args=list(settings.extra_spirv_fuzz_generate_args) +
                    list(settings.common_spirv_args),
                )
            finally:
                gflogging.pop_stream_for_logging()
    except subprocess.CalledProcessError:
        util.mkdirs_p(fuzz_failures_dir)
        if len(list(
                fuzz_failures_dir.iterdir())) < settings.maximum_fuzz_failures:
            util.copy_dir(staging_dir, fuzz_failures_dir / staging_dir.name)
        return

    reference_name = reference_spirv_shader_job_orig_path.stem

    stable_shader = reference_name.startswith("stable_")

    common_spirv_args = list(settings.common_spirv_args)

    test_dirs = [
        make_test(
            template_source_dir,
            staging_dir / f"{staging_name}_no_opt_test",
            spirv_opt_args=None,
            binary_manager=binary_manager,
            derived_from=reference_name,
            stable_shader=stable_shader,
            common_spirv_args=common_spirv_args,
        ),
        make_test(
            template_source_dir,
            staging_dir / f"{staging_name}_opt_O_test",
            spirv_opt_args=["-O"],
            binary_manager=binary_manager,
            derived_from=reference_name,
            stable_shader=stable_shader,
            common_spirv_args=common_spirv_args,
        ),
        make_test(
            template_source_dir,
            staging_dir / f"{staging_name}_opt_Os_test",
            spirv_opt_args=["-Os"],
            binary_manager=binary_manager,
            derived_from=reference_name,
            stable_shader=stable_shader,
            common_spirv_args=common_spirv_args,
        ),
        make_test(
            template_source_dir,
            staging_dir / f"{staging_name}_opt_rand1_test",
            spirv_opt_args=spirv_opt_util.random_spirv_opt_args(),
            binary_manager=binary_manager,
            derived_from=reference_name,
            stable_shader=stable_shader,
            common_spirv_args=common_spirv_args,
        ),
        make_test(
            template_source_dir,
            staging_dir / f"{staging_name}_opt_rand2_test",
            spirv_opt_args=spirv_opt_util.random_spirv_opt_args(),
            binary_manager=binary_manager,
            derived_from=reference_name,
            stable_shader=stable_shader,
            common_spirv_args=common_spirv_args,
        ),
        make_test(
            template_source_dir,
            staging_dir / f"{staging_name}_opt_rand3_test",
            spirv_opt_args=spirv_opt_util.random_spirv_opt_args(),
            binary_manager=binary_manager,
            derived_from=reference_name,
            stable_shader=stable_shader,
            common_spirv_args=common_spirv_args,
        ),
    ]

    for test_dir in test_dirs:
        interrupt_util.interrupt_if_needed()
        if handle_test(test_dir, reports_dir, active_devices, binary_manager,
                       settings):
            # If we generated a report, don't bother trying other optimization combinations.
            break
Пример #9
0
def run_spirv_reduce_or_shrink(  # pylint: disable=too-many-locals;
    source_dir: Path,
    name_of_shader_job_to_reduce: str,
    extension_to_reduce: str,
    output_dir: Path,
    preserve_semantics: bool,
    binary_manager: binaries_util.BinaryManager,
    settings: Settings,
) -> Path:
    test = test_util.metadata_read_from_source_dir(source_dir)
    input_shader_job = source_dir / name_of_shader_job_to_reduce / test_util.SHADER_JOB

    original_spirv_file = input_shader_job.with_suffix(
        extension_to_reduce + shader_job_util.SUFFIX_SPIRV_ORIG)

    transformed_spirv_file = input_shader_job.with_suffix(
        extension_to_reduce + shader_job_util.SUFFIX_SPIRV)
    transformations_file = input_shader_job.with_suffix(
        extension_to_reduce + shader_job_util.SUFFIX_TRANSFORMATIONS)

    util.mkdirs_p(output_dir)

    final_shader = output_dir / "final.spv"

    # E.g. transformation_suffix_to_reduce == ".frag.transformations"

    # E.g. ".frag.??" -> ".frag.spv"
    shader_suffix_to_override = extension_to_reduce + shader_job_util.SUFFIX_SPIRV

    if preserve_semantics:
        cmd = [
            str(binary_manager.get_binary_path_by_name("spirv-fuzz").path),
            str(original_spirv_file),
            "-o",
            str(final_shader),
            f"--shrink={str(transformations_file)}",
            f"--shrinker-temp-file-prefix={str(output_dir / 'temp_')}",
        ]
        cmd += list(settings.extra_spirv_fuzz_shrink_args)
        cmd += list(test.common_spirv_args)
        cmd += [
            # This ensures the arguments that follow are all positional arguments.
            "--",
            "gfauto_interestingness_test",
            str(source_dir),
            # --override_shader requires three parameters to follow; the third will be added by spirv-fuzz (the shader.spv file).
            "--override_shader",
            name_of_shader_job_to_reduce,
            shader_suffix_to_override,
        ]
    else:
        cmd = [
            str(binary_manager.get_binary_path_by_name("spirv-reduce").path),
            str(transformed_spirv_file),
            "-o",
            str(final_shader),
            f"--temp-file-prefix={str(output_dir / 'temp_')}",
        ]
        cmd += list(settings.extra_spirv_reduce_args)
        cmd += list(test.common_spirv_args)
        cmd += [
            # This ensures the arguments that follow are all positional arguments.
            "--",
            "gfauto_interestingness_test",
            str(source_dir),
            # --override_shader requires three parameters to follow; the third will be added by spirv-reduce (the shader.spv file).
            "--override_shader",
            name_of_shader_job_to_reduce,
            shader_suffix_to_override,
        ]

    # Log the reduction.
    with util.file_open_text(output_dir / "command.log", "w") as f:
        gflogging.push_stream_for_logging(f)
        try:
            # The reducer can fail, but it will typically output an exception file, so we can ignore the exit code.
            subprocess_util.run(cmd, verbose=True, check_exit_code=False)
        finally:
            gflogging.pop_stream_for_logging()

    return final_shader
Пример #10
0
def main() -> None:  # pylint: disable=too-many-locals;
    parser = argparse.ArgumentParser(
        description="Generates an AmberScript test from a shader job.")

    parser.add_argument(
        "shader_job",
        help="The input .json shader job file.",
    )

    parser.add_argument(
        "--output",
        help="Output directory.",
        default="output",
    )

    parser.add_argument(
        "--spirv_opt_args",
        help=
        "Arguments for spirv-opt as a space-separated string, or an empty string to skip running spirv-opt.",
        default="",
    )

    parser.add_argument(
        "--settings",
        help=
        "Path to a settings JSON file for this instance. The file will be generated if needed. ",
        default="settings.json",
    )

    parsed_args = parser.parse_args(sys.argv[1:])

    shader_job: Path = Path(parsed_args.shader_job)
    out_dir: Path = Path(parsed_args.output)
    spirv_opt_args_str: str = parsed_args.spirv_opt_args
    settings_path: Path = Path(parsed_args.settings)

    spirv_opt_args: List[str] = []
    if spirv_opt_args_str:
        spirv_opt_args = spirv_opt_args_str.split(" ")

    settings = settings_util.read_or_create(settings_path)

    binary_manager = binaries_util.get_default_binary_manager(settings)

    staging_dir = out_dir / "staging"

    template_source_dir = staging_dir / "source_template"
    test_dir = staging_dir / "test"

    run_output_dir: Path = out_dir / "run"

    # Remove stale directories.
    if staging_dir.is_dir():
        shutil.rmtree(staging_dir)
    if run_output_dir.is_dir():
        shutil.rmtree(run_output_dir)

    # Create source template and call |make_test|.

    if shader_job_util.get_related_suffixes_that_exist(
            shader_job, language_suffix=[shader_job_util.SUFFIX_SPIRV]):
        # This is a SPIR-V shader job.

        shader_job_util.copy(
            shader_job,
            template_source_dir / test_util.VARIANT_DIR / test_util.SHADER_JOB,
            language_suffix=shader_job_util.SUFFIXES_SPIRV_FUZZ_INPUT,
        )

        fuzz_spirv_amber_test.make_test(
            template_source_dir,
            test_dir,
            spirv_opt_args=spirv_opt_args,
            binary_manager=binary_manager,
            derived_from=shader_job.stem,
            stable_shader=False,
            common_spirv_args=list(settings.common_spirv_args),
        )

    elif shader_job_util.get_related_suffixes_that_exist(
            shader_job, language_suffix=[shader_job_util.SUFFIX_GLSL]):
        # This is a GLSL shader job.

        # The "graphicsfuzz-tool" tool is designed to be on your PATH so that e.g. ".bat" will be appended on Windows.
        # So we use tool_on_path with a custom PATH to get the actual file we want to execute.
        graphicsfuzz_tool_path = util.tool_on_path(
            "graphicsfuzz-tool",
            str(
                binary_manager.get_binary_path_by_name(
                    "graphicsfuzz-tool").path.parent),
        )

        with util.file_open_text(staging_dir / "log.txt", "w") as log_file:
            try:
                gflogging.push_stream_for_logging(log_file)

                # Create the prepared (for Vulkan GLSL) reference.
                glsl_generate_util.run_prepare_reference(
                    graphicsfuzz_tool_path,
                    shader_job,
                    template_source_dir / test_util.VARIANT_DIR /
                    test_util.SHADER_JOB,
                    legacy_graphics_fuzz_vulkan_arg=settings.
                    legacy_graphics_fuzz_vulkan_arg,
                )
            finally:
                gflogging.pop_stream_for_logging()

        fuzz_glsl_amber_test.make_test(
            template_source_dir,
            test_dir,
            spirv_opt_args=spirv_opt_args,
            binary_manager=binary_manager,
            derived_from=shader_job.stem,
            stable_shader=False,
            common_spirv_args=list(settings.common_spirv_args),
        )

    else:
        raise AssertionError(
            "Unexpected shader job type; expected GLSL or SPIR-V shaders.")

    preprocessor_cache = util.CommandCache()

    fuzz_test_util.run_shader_job(
        source_dir=test_util.get_source_dir(test_dir),
        output_dir=run_output_dir,
        binary_manager=binary_manager,
        device=Device(host=DeviceHost()),
        preprocessor_cache=preprocessor_cache,
        stop_after_amber=True,
    )
Пример #11
0
def artifact_execute_recipe(  # pylint: disable=too-many-branches;
    artifact_path: str = "",
    only_if_artifact_json_missing: bool = False,
    built_in_recipes: Optional[RecipeMap] = None,
) -> None:
    artifact_path = artifact_path_absolute(artifact_path)
    executing_lock_file_path = artifact_get_inner_file_path(
        ARTIFACT_EXECUTING_LOCK_FILE_NAME, artifact_path)

    busy_waiting = False
    first_wait = True

    # We may have to retry if another process appears to be executing this recipe.
    while True:
        if busy_waiting:
            time.sleep(BUSY_WAIT_IN_SECONDS)
            if first_wait:
                log(f"Waiting for {artifact_path} due to lock file {executing_lock_file_path}"
                    )
                first_wait = False

        if executing_lock_file_path.exists():
            # Retry.
            busy_waiting = True
            continue

        # Several processes can still execute here concurrently; the above check is just an optimization.

        # The metadata file should be written atomically once the artifact is ready for use, so if it exists, we can
        # just return.
        if (only_if_artifact_json_missing
                and artifact_get_metadata_file_path(artifact_path).exists()):
            return

        # The recipe file should be written atomically. If it exists, we are fine to continue. If not and if we have
        # the recipe in |built_in_recipes|, more than one process might write it, but the final rename from TEMP_FILE ->
        # RECIPE.json is atomic, so *some* process will succeed and the contents will be valid. Thus, we should be fine
        # to continue once we have written the recipe.
        if (not artifact_get_recipe_file_path(artifact_path).exists()
                and built_in_recipes):
            built_in_recipe = built_in_recipes[artifact_path]
            if not built_in_recipe:
                raise FileNotFoundError(
                    str(artifact_get_recipe_file_path(artifact_path)))
            # This is atomic; should not fail.
            artifact_write_recipe(built_in_recipe, artifact_path)

        recipe = artifact_read_recipe(artifact_path)

        # Create EXECUTING_LOCK file. The "x" means exclusive creation. This will fail if the file already exists;
        # i.e. another process won the race and is executing the recipe; if so, we retry from the beginning of this
        # function (and will return early). Otherwise, we can continue. We don't need to keep the file open; the file
        # is not opened with exclusive access, just created exclusively.
        try:
            with util.file_open_text(executing_lock_file_path,
                                     "x") as lock_file:
                lock_file.write("locked")
        except FileExistsError:
            # Retry.
            busy_waiting = True
            continue

        # If we fail here (e.g. KeyboardInterrupt), we won't remove the lock file. But any alternative will either have
        # the same problem (interrupts can happen at almost any time) or could end up accidentally removing the lock
        # file made by another process, so this is the safest approach. Users can manually delete lock files if needed;
        # the log output indicates the file on which we are blocked.

        try:
            with util.file_open_text(
                    artifact_get_recipe_log_file_path(artifact_path),
                    "w") as f:
                gflogging.push_stream_for_logging(f)
                try:
                    if recipe.HasField("download_and_extract_archive_set"):
                        recipe_download_and_extract_archive_set.recipe_download_and_extract_archive_set(
                            recipe.download_and_extract_archive_set,
                            artifact_path)
                    else:
                        raise NotImplementedError(
                            "Artifact {} has recipe type {} and this is not implemented"
                            .format(artifact_path,
                                    recipe.WhichOneof("recipe")))
                finally:
                    gflogging.pop_stream_for_logging()
        finally:
            # Delete the lock file when we have finished. Ignore errors.
            try:
                executing_lock_file_path.unlink()
            except OSError:
                log(f"WARNING: failed to delete: {str(executing_lock_file_path)}"
                    )
Пример #12
0
def main_helper(  # pylint: disable=too-many-locals,too-many-branches,too-many-statements;
    tests_dir: Path,
    work_dir: Path,
    binaries: binaries_util.BinaryManager,
    settings: Settings,
    active_devices: List[Device],
    results_out_handle: Optional[TextIO],
    updated_settings_output_path: Optional[Path],
) -> None:

    util.mkdirs_p(work_dir)

    def write_entry(entry: str) -> None:
        if not results_out_handle:
            return
        results_out_handle.write(entry)
        results_out_handle.write(", ")
        results_out_handle.flush()

    def write_newline() -> None:
        if not results_out_handle:
            return
        results_out_handle.write("\n")
        results_out_handle.flush()

    spirv_opt_path: Optional[Path] = None
    swift_shader_path: Optional[Path] = None
    amber_path: Optional[Path] = None

    # Small hack to ensure we have three devices for spirv-opt, each with a different name.
    main_spirv_opt_device: Optional[Device] = None

    if active_devices and active_devices[0].name == "host_preprocessor":
        main_spirv_opt_device = active_devices[0]
        main_spirv_opt_device.name = SPIRV_OPT_O

        spirv_opt_custom = Device()
        spirv_opt_custom.CopyFrom(main_spirv_opt_device)
        spirv_opt_custom.name = SPIRV_OPT_CUSTOM
        active_devices.insert(1, spirv_opt_custom)

        spirv_opt_os = Device()
        spirv_opt_os.CopyFrom(main_spirv_opt_device)
        spirv_opt_os.name = SPIRV_OPT_OS
        active_devices.insert(1, spirv_opt_os)

    # Enumerate active devices, writing their name and storing binary paths if needed.
    write_entry("test")
    for device in active_devices:
        write_entry(device.name)

        if device.HasField("preprocess"):
            spirv_opt_path = binaries.get_binary_path_by_name(
                binaries_util.SPIRV_OPT_NAME).path

        if device.HasField("swift_shader"):
            swift_shader_path = binaries.get_binary_path_by_name(
                binaries_util.SWIFT_SHADER_NAME).path

        if device.HasField("swift_shader") or device.HasField("host"):
            amber_path = binaries.get_binary_path_by_name(
                binaries_util.AMBER_NAME).path

    write_newline()

    # Enumerate tests and devices, writing the results.

    for test in sorted(tests_dir.glob("*.amber")):
        test_name = util.remove_end(test.name, ".amber")
        write_entry(test_name)
        spirv_shaders = sorted(
            tests_dir.glob(util.remove_end(test.name, "amber") + "*.spv"))
        for device in active_devices:
            test_run_dir = work_dir / f"{test_name}_{device.name}"
            util.mkdirs_p(test_run_dir)
            ignored_signatures_set: Set[str] = set(
                device.ignored_crash_signatures)

            with util.file_open_text(test_run_dir / "log.txt",
                                     "w") as log_stream:
                try:
                    gflogging.push_stream_for_logging(log_stream)
                    if device.HasField("preprocess"):
                        # This just means spirv-opt for now.

                        assert spirv_opt_path  # noqa
                        assert main_spirv_opt_device  # noqa

                        # Pick spirv-opt arguments based on device name.
                        if device.name == SPIRV_OPT_O:
                            spirv_opt_args = ["-O"]
                        elif device.name == SPIRV_OPT_OS:
                            spirv_opt_args = ["-Os"]
                        elif device.name == SPIRV_OPT_CUSTOM:
                            spirv_opt_args = (spirv_opt_util.
                                              OPT_INTERESTING_SUBSET_OF_PASSES)
                        else:
                            raise AssertionError(
                                f"Can't tell how to run device {device.name}; "
                                f"must be named host_preprocessor and be the first active device."
                            )

                        # Reset device and ignored_crash_signatures.
                        device = main_spirv_opt_device
                        ignored_signatures_set = set(
                            device.ignored_crash_signatures)

                        try:
                            for spirv_shader in spirv_shaders:
                                spirv_opt_util.run_spirv_opt_on_spirv_shader(
                                    spirv_shader,
                                    test_run_dir,
                                    spirv_opt_args,
                                    spirv_opt_path,
                                )
                            result_util.write_status(
                                test_run_dir,
                                fuzz.STATUS_SUCCESS,
                            )
                        except subprocess.CalledProcessError:
                            result_util.write_status(
                                test_run_dir,
                                fuzz.STATUS_TOOL_CRASH,
                            )
                        except subprocess.TimeoutExpired:
                            result_util.write_status(
                                test_run_dir,
                                fuzz.STATUS_TOOL_TIMEOUT,
                            )
                    elif device.HasField("shader_compiler"):
                        try:
                            for spirv_shader in spirv_shaders:
                                shader_compiler_util.run_shader(
                                    shader_compiler_device=device.
                                    shader_compiler,
                                    shader_path=spirv_shader,
                                    output_dir=test_run_dir,
                                    compiler_path=binaries.
                                    get_binary_path_by_name(
                                        device.shader_compiler.binary).path,
                                    timeout=DEFAULT_TIMEOUT,
                                )
                            result_util.write_status(
                                test_run_dir,
                                fuzz.STATUS_SUCCESS,
                            )
                        except subprocess.CalledProcessError:
                            result_util.write_status(
                                test_run_dir,
                                fuzz.STATUS_CRASH,
                            )
                        except subprocess.TimeoutExpired:
                            result_util.write_status(
                                test_run_dir,
                                fuzz.STATUS_TIMEOUT,
                            )
                    elif device.HasField("swift_shader"):
                        assert swift_shader_path  # noqa
                        assert amber_path  # noqa
                        host_device_util.run_amber(
                            test,
                            test_run_dir,
                            amber_path=amber_path,
                            dump_image=False,
                            dump_buffer=False,
                            icd=swift_shader_path,
                        )
                    elif device.HasField("host"):
                        assert amber_path  # noqa
                        host_device_util.run_amber(
                            test,
                            test_run_dir,
                            amber_path=amber_path,
                            dump_image=False,
                            dump_buffer=False,
                            custom_launcher=list(device.host.custom_launcher),
                        )
                    elif device.HasField("android"):
                        android_device.run_amber_on_device(
                            test,
                            test_run_dir,
                            dump_image=False,
                            dump_buffer=False,
                            serial=device.android.serial,
                        )
                    else:
                        raise AssertionError(
                            f"Unsupported device {device.name}")

                finally:
                    gflogging.pop_stream_for_logging()

            status = result_util.get_status(test_run_dir)

            if status == fuzz.STATUS_SUCCESS:
                write_entry("P")
            elif status in (fuzz.STATUS_TIMEOUT, fuzz.STATUS_TOOL_TIMEOUT):
                write_entry("T")
            else:
                write_entry("F")

            # Update ignored signatures.
            if (status in (
                    fuzz.STATUS_TOOL_CRASH,
                    fuzz.STATUS_CRASH,
                    fuzz.STATUS_UNRESPONSIVE,
            ) and updated_settings_output_path):
                log_contents = util.file_read_text(
                    result_util.get_log_path(test_run_dir))
                signature = signature_util.get_signature_from_log_contents(
                    log_contents)
                if signature == signature_util.NO_SIGNATURE:
                    log(f"NOT updating ignored signatures to include {signature}"
                        )
                elif signature in ignored_signatures_set:
                    log(f"Signature is already ignored: {signature}")
                else:
                    log(f"Adding ignored signature: {signature}")
                    device.ignored_crash_signatures.append(signature)

        write_newline()

    if updated_settings_output_path:
        # Reset main_spirv_opt_device name before writing it back out.
        if main_spirv_opt_device:
            main_spirv_opt_device.name = "host_preprocessor"
        settings_util.write(settings, updated_settings_output_path)
def main() -> None:  # pylint: disable=too-many-locals,too-many-branches,too-many-statements;
    parser = argparse.ArgumentParser(
        description="Runs GraphicsFuzz AmberScript tests on the active devices listed in "
        "the settings.json file.",
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
    )

    parser.add_argument(
        "--settings",
        help="Path to the settings JSON file for this instance.",
        default=str(settings_util.DEFAULT_SETTINGS_FILE_PATH),
    )

    parser.add_argument(
        "--tests",
        help="Path to the directory of AmberScript tests with shaders extracted.",
        default="graphicsfuzz",
    )

    parser.add_argument(
        "--update_ignored_signatures",
        help="As the tests are run for each device, add any crash signatures to the device's ignored_crash_signatures "
        "property and write out the updated settings.json file.",
        action="store_true",
    )

    parser.add_argument(
        "--results_out",
        help="Output file path for the CSV results table.",
        default="results.csv",
    )

    parsed_args = parser.parse_args(sys.argv[1:])

    # Args.
    tests_dir: Path = Path(parsed_args.tests)
    settings_path: Path = Path(parsed_args.settings)
    update_ignored_signatures: bool = parsed_args.update_ignored_signatures
    results_out_path: Path = Path(parsed_args.results_out)

    # Settings and devices.
    settings = settings_util.read_or_create(settings_path)
    active_devices = devices_util.get_active_devices(settings.device_list)

    # Binaries.
    binaries = binaries_util.get_default_binary_manager(settings=settings)

    work_dir = Path() / "temp" / f"cts_run_{fuzz.get_random_name()[:8]}"

    util.mkdirs_p(work_dir)

    with util.file_open_text(results_out_path, "w") as results_handle:

        def write_entry(entry: str) -> None:
            results_handle.write(entry)
            results_handle.write(", ")
            results_handle.flush()

        def write_newline() -> None:
            results_handle.write("\n")
            results_handle.flush()

        spirv_opt_path: Optional[Path] = None
        swift_shader_path: Optional[Path] = None
        amber_path: Optional[Path] = None

        # Small hack to ensure we have three devices for spirv-opt, each with a different name.
        main_spirv_opt_device: Optional[Device] = None

        if active_devices and active_devices[0].name == "host_preprocessor":
            main_spirv_opt_device = active_devices[0]
            main_spirv_opt_device.name = SPIRV_OPT_O

            spirv_opt_custom = Device()
            spirv_opt_custom.CopyFrom(main_spirv_opt_device)
            spirv_opt_custom.name = SPIRV_OPT_CUSTOM
            active_devices.insert(1, spirv_opt_custom)

            spirv_opt_os = Device()
            spirv_opt_os.CopyFrom(main_spirv_opt_device)
            spirv_opt_os.name = SPIRV_OPT_OS
            active_devices.insert(1, spirv_opt_os)

        # Enumerate active devices, writing their name and storing binary paths if needed.
        write_entry("test")
        for device in active_devices:
            write_entry(device.name)

            if device.HasField("preprocess"):
                spirv_opt_path = binaries.get_binary_path_by_name(
                    binaries_util.SPIRV_OPT_NAME
                ).path

            if device.HasField("swift_shader"):
                swift_shader_path = binaries.get_binary_path_by_name(
                    binaries_util.SWIFT_SHADER_NAME
                ).path

            if device.HasField("swift_shader") or device.HasField("host"):
                amber_path = binaries.get_binary_path_by_name(
                    binaries_util.AMBER_NAME
                ).path

        write_newline()

        # Enumerate tests and devices, writing the results.

        for test in sorted(tests_dir.glob("*.amber")):
            test_name = util.remove_end(test.name, ".amber")
            write_entry(test_name)
            spirv_shaders = sorted(
                tests_dir.glob(util.remove_end(test.name, "amber") + "*.spv")
            )
            for device in active_devices:
                test_run_dir = work_dir / f"{test_name}_{device.name}"
                util.mkdirs_p(test_run_dir)
                ignored_signatures_set: Set[str] = set(device.ignored_crash_signatures)

                with util.file_open_text(test_run_dir / "log.txt", "w") as log_stream:
                    try:
                        gflogging.push_stream_for_logging(log_stream)
                        if device.HasField("preprocess"):
                            # This just means spirv-opt for now.

                            assert spirv_opt_path  # noqa
                            assert main_spirv_opt_device  # noqa

                            # Pick spirv-opt arguments based on device name.
                            if device.name == SPIRV_OPT_O:
                                spirv_opt_args = ["-O"]
                            elif device.name == SPIRV_OPT_OS:
                                spirv_opt_args = ["-Os"]
                            elif device.name == SPIRV_OPT_CUSTOM:
                                spirv_opt_args = (
                                    spirv_opt_util.OPT_INTERESTING_SUBSET_OF_PASSES
                                )
                            else:
                                raise AssertionError(
                                    f"Can't tell how to run device {device.name}; "
                                    f"must be named host_preprocessor and be the first active device."
                                )

                            # Reset device and ignored_crash_signatures.
                            device = main_spirv_opt_device
                            ignored_signatures_set = set(
                                device.ignored_crash_signatures
                            )

                            try:
                                for spirv_shader in spirv_shaders:
                                    spirv_opt_util.run_spirv_opt_on_spirv_shader(
                                        spirv_shader,
                                        test_run_dir,
                                        spirv_opt_args,
                                        spirv_opt_path,
                                    )
                                result_util.write_status(
                                    test_run_dir, fuzz.STATUS_SUCCESS,
                                )
                            except subprocess.CalledProcessError:
                                result_util.write_status(
                                    test_run_dir, fuzz.STATUS_TOOL_CRASH,
                                )
                            except subprocess.TimeoutExpired:
                                result_util.write_status(
                                    test_run_dir, fuzz.STATUS_TOOL_TIMEOUT,
                                )
                        elif device.HasField("shader_compiler"):
                            try:
                                for spirv_shader in spirv_shaders:
                                    shader_compiler_util.run_shader(
                                        shader_compiler_device=device.shader_compiler,
                                        shader_path=spirv_shader,
                                        output_dir=test_run_dir,
                                        compiler_path=binaries.get_binary_path_by_name(
                                            device.shader_compiler.binary
                                        ).path,
                                        timeout=DEFAULT_TIMEOUT,
                                    )
                                result_util.write_status(
                                    test_run_dir, fuzz.STATUS_SUCCESS,
                                )
                            except subprocess.CalledProcessError:
                                result_util.write_status(
                                    test_run_dir, fuzz.STATUS_CRASH,
                                )
                            except subprocess.TimeoutExpired:
                                result_util.write_status(
                                    test_run_dir, fuzz.STATUS_TIMEOUT,
                                )
                        elif device.HasField("swift_shader"):
                            assert swift_shader_path  # noqa
                            assert amber_path  # noqa
                            host_device_util.run_amber(
                                test,
                                test_run_dir,
                                amber_path=amber_path,
                                dump_image=False,
                                dump_buffer=False,
                                icd=swift_shader_path,
                            )
                        elif device.HasField("host"):
                            assert amber_path  # noqa
                            host_device_util.run_amber(
                                test,
                                test_run_dir,
                                amber_path=amber_path,
                                dump_image=False,
                                dump_buffer=False,
                                custom_launcher=list(device.host.custom_launcher),
                            )
                        elif device.HasField("android"):
                            android_device.run_amber_on_device(
                                test,
                                test_run_dir,
                                dump_image=False,
                                dump_buffer=False,
                                serial=device.android.serial,
                            )
                        else:
                            raise AssertionError(f"Unsupported device {device.name}")

                    finally:
                        gflogging.pop_stream_for_logging()

                status = result_util.get_status(test_run_dir)

                if status == fuzz.STATUS_SUCCESS:
                    write_entry("P")
                elif status in (fuzz.STATUS_TIMEOUT, fuzz.STATUS_TOOL_TIMEOUT):
                    write_entry("T")
                else:
                    write_entry("F")

                # Update ignored signatures.
                if (
                    status
                    in (
                        fuzz.STATUS_TOOL_CRASH,
                        fuzz.STATUS_CRASH,
                        fuzz.STATUS_UNRESPONSIVE,
                    )
                    and update_ignored_signatures
                ):
                    log_contents = util.file_read_text(
                        result_util.get_log_path(test_run_dir)
                    )
                    signature = signature_util.get_signature_from_log_contents(
                        log_contents
                    )
                    if signature == signature_util.NO_SIGNATURE:
                        log(f"NOT updating ignored signatures to include {signature}")
                    elif signature in ignored_signatures_set:
                        log(f"Signature is already ignored: {signature}")
                    else:
                        log(f"Adding ignored signature: {signature}")
                        device.ignored_crash_signatures.append(signature)

            write_newline()

        if update_ignored_signatures:
            # Reset main_spirv_opt_device name before writing it back out.
            if main_spirv_opt_device:
                main_spirv_opt_device.name = "host_preprocessor"
            settings_util.write(settings, settings_path)