def run_amber( amber_script_file: Path, output_dir: Path, dump_image: bool, dump_buffer: bool, amber_path: Path, skip_render: bool = False, debug_layers: bool = False, icd: Optional[Path] = None, ) -> Path: with util.file_open_text(result_util.get_amber_log_path(output_dir), "w") as log_file: try: gflogging.push_stream_for_logging(log_file) run_amber_helper( amber_script_file, output_dir, dump_image, dump_buffer, amber_path, skip_render, debug_layers, icd, ) finally: gflogging.pop_stream_for_logging() return output_dir
def run_amber_on_device( amber_script_file: Path, output_dir: Path, dump_image: bool, dump_buffer: bool, skip_render: bool = False, serial: Optional[str] = None, ) -> Path: with file_open_text(result_util.get_amber_log_path(output_dir), "w") as log_file: try: gflogging.push_stream_for_logging(log_file) run_amber_on_device_helper( amber_script_file, output_dir, dump_image, dump_buffer, skip_render, serial, ) finally: gflogging.pop_stream_for_logging() return output_dir
def run_glsl_reduce( source_dir: Path, name_of_shader_to_reduce: str, output_dir: Path, binary_manager: binaries_util.BinaryManager, preserve_semantics: bool = False, extra_args: Optional[List[str]] = None, ) -> Path: input_shader_job = source_dir / name_of_shader_to_reduce / test_util.SHADER_JOB glsl_reduce_path = util.tool_on_path( "glsl-reduce", str( binary_manager.get_binary_path_by_name( "graphicsfuzz-tool").path.parent), ) cmd = [ str(glsl_reduce_path), str(input_shader_job), "--output", str(output_dir), ] if preserve_semantics: cmd.append("--preserve-semantics") if extra_args: cmd.extend(extra_args) cmd.extend([ # This ensures the arguments that follow are all positional arguments. "--", "gfauto_interestingness_test", str(source_dir), # --override_shader_job requires two parameters to follow; the second will be added by glsl-reduce (the shader.json file). "--override_shader_job", str(name_of_shader_to_reduce), ]) # Log the reduction. with util.file_open_text(output_dir / "command.log", "w") as f: gflogging.push_stream_for_logging(f) try: # The reducer can fail, but it will typically output an exception file, so we can ignore the exit code. subprocess_util.run(cmd, verbose=True, check_exit_code=False) finally: gflogging.pop_stream_for_logging() return output_dir
def extract_shaders(amber_file: Path, output_dir: Path, binaries: binaries_util.BinaryManager) -> List[Path]: files_written: List[Path] = [] with util.file_open_text(amber_file, "r") as file_handle: lines = file_handle.readlines() if lines[0].startswith("#!amber"): files_written += extract_shaders_amber_script( amber_file, lines, output_dir, binaries) else: log(f"Skipping VkScript file {str(amber_file)} for now.") files_written += extract_shaders_vkscript(amber_file, lines, output_dir, binaries) return files_written
def process_chunk( # pylint: disable=too-many-locals; chunk_num: int, chunk: Set[str], log_files: List[Path], output_file: TextIO ) -> None: log(f"\nChunk {chunk_num}:") output_file.write(f"\nChunk {chunk_num}:\n") unique_signatures: Set[str] = set() for log_file in log_files: with util.file_open_text(log_file, "r") as f: first_line = f.readline() match = re.fullmatch(r"Iteration seed: (\d+)\n", first_line) assert match # noqa seed = match.group(1) if seed not in chunk: continue lines = f.readlines() start_line = 0 end_line = 0 found_bug = False for i, line in enumerate(lines): match = re.fullmatch(r"STATUS (\w+)\n", line) if not match: continue status = match.group(1) if status == "SUCCESS": start_line = i + 1 continue found_bug = True end_line = i + 1 break if not found_bug: continue failure_log = "\n".join(lines[start_line:end_line]) signature = signature_util.get_signature_from_log_contents(failure_log) unique_signatures.add(signature) # Print the signatures. for signature in sorted(unique_signatures): log(signature) output_file.write(f"{signature}\n")
def main() -> None: parser = argparse.ArgumentParser( description="Outputs number of lines covered from .cov files.", formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) parser.add_argument( "coverage_files", metavar="coverage_files", type=str, nargs="*", help="The .cov files to process, one after the other.", ) parser.add_argument( "--out", type=str, help="Output results text file.", default="out.txt", ) parsed_args = parser.parse_args(sys.argv[1:]) input_files: List[str] = parsed_args.coverage_files output_file: str = parsed_args.out with util.file_open_text(Path(output_file), "w") as out: for coverage_file in input_files: with open(coverage_file, mode="rb") as f: all_line_counts: cov_util.LineCounts = pickle.load(f) total_num_lines = 0 total_num_covered_lines = 0 # |all_line_counts| maps from source file to another map. We just need the map. for line_counts in all_line_counts.values(): # |line_counts| maps from line number to execution count. We just need the execution count. for execution_count in line_counts.values(): total_num_lines += 1 if execution_count > 0: total_num_covered_lines += 1 log(f"{total_num_covered_lines}, {total_num_lines}") out.write(f"{total_num_covered_lines}, {total_num_lines}\n")
def main() -> None: parser = argparse.ArgumentParser(description="Fuzz") parser.add_argument( "--settings", help="Path to the settings JSON file for this fuzzing instance.", default=str(settings_util.DEFAULT_SETTINGS_FILE_PATH), ) parser.add_argument( "--iteration_seed", help="The seed to use for one fuzzing iteration (useful for reproducing an issue).", ) parser.add_argument( "--use_spirv_fuzz", help="Do fuzzing using spirv-fuzz, which must be on your PATH.", action="store_true", ) parser.add_argument( "--force_no_stack_traces", help="Continue even if we cannot get stack traces (using catchsegv or cdb).", action="store_true", ) parsed_args = parser.parse_args(sys.argv[1:]) settings_path = Path(parsed_args.settings) iteration_seed: Optional[int] = None if parsed_args.iteration_seed is None else int( parsed_args.iteration_seed ) use_spirv_fuzz: bool = parsed_args.use_spirv_fuzz force_no_stack_traces: bool = parsed_args.force_no_stack_traces with util.file_open_text(Path(f"log_{get_random_name()}.txt"), "w") as log_file: gflogging.push_stream_for_logging(log_file) try: main_helper( settings_path, iteration_seed, use_spirv_fuzz, force_no_stack_traces ) except settings_util.NoSettingsFile as exception: log(str(exception)) finally: gflogging.pop_stream_for_logging()
def download_gerrit_revision( output_path: Path, change_number: str, revision: str, download_type: DownloadType, cookie: str, ) -> Path: path = f"/changes/{change_number}/revisions/{revision}/{download_type.value}" log(f"Downloading revision from: {path}\n to: {str(output_path)}") params = { "format": "tgz" } if download_type == DownloadType.Archive else { "zip": "" } response = gerrit_get_stream(KHRONOS_GERRIT_URL, path, params=params, cookie=cookie) counter = 0 with util.file_open_binary(output_path, "wb") as output_stream: for chunk in response.iter_content(chunk_size=None): log(".", skip_newline=True) counter += 1 if counter > 80: counter = 0 log("") # new line output_stream.write(chunk) log("") # new line with util.file_open_text(output_path, "r") as input_stream: line = input_stream.readline(len(KHRONOS_GERRIT_LOGIN_PAGE_START) * 2) if line.startswith(KHRONOS_GERRIT_LOGIN_PAGE_START) or line.startswith( "Not found"): raise BadCookieError() return output_path
def main() -> None: parser = argparse.ArgumentParser(description="Processes a seed file.") parser.add_argument( "seed_file", help="Seed file to process.", ) parser.add_argument( "--out", help="Output file.", default="signatures_chunked.txt", ) parsed_args = parser.parse_args(sys.argv[1:]) seed_file: Path = Path(parsed_args.seed_file) output_file: Path = Path(parsed_args.out) # Get a list of all log files. log_files: List[Path] = sorted(Path().glob("log_*.txt")) # Get chunks of seeds and call process_chunk. seeds: List[str] = util.file_read_text(seed_file).split() check(len(seeds) == 10_000, AssertionError("Expected 10,000 seeds.")) with util.file_open_text(output_file, "w") as output: index = 0 for chunk_num in range(0, 10): chunk: Set[str] = set() for _ in range(0, 1_000): chunk.add(seeds[index]) index += 1 process_chunk(chunk_num, chunk, log_files, output) check( index == 10_000, AssertionError("Expected to have processed 10,000 seeds.") )
def main() -> None: # pylint: disable=too-many-locals,too-many-branches,too-many-statements; parser = argparse.ArgumentParser( description="Runs GraphicsFuzz AmberScript tests on the active devices listed in " "the settings.json file." ) parser.add_argument( "--settings", help="Path to the settings JSON file for this instance.", default=str(settings_util.DEFAULT_SETTINGS_FILE_PATH), ) parser.add_argument( "--tests", help="Path to the directory of AmberScript tests with shaders extracted.", default=str("graphicsfuzz"), ) parsed_args = parser.parse_args(sys.argv[1:]) # Args. tests_dir: Path = Path(parsed_args.tests) settings_path: Path = Path(parsed_args.settings) # Settings and devices. settings = settings_util.read_or_create(settings_path) active_devices = devices_util.get_active_devices(settings.device_list) # Binaries. binaries = binaries_util.get_default_binary_manager(settings=settings) work_dir = Path() / "temp" / f"cts_run_{fuzz.get_random_name()[:8]}" util.mkdirs_p(work_dir) with util.file_open_text(Path("results.txt"), "w") as log_handle: def write_entry(entry: str) -> None: log_handle.write(entry) log_handle.write(", ") log_handle.flush() def write_newline() -> None: log_handle.write("\n") log_handle.flush() spirv_opt_path: Optional[Path] = None swift_shader_path: Optional[Path] = None amber_path: Optional[Path] = None # Enumerate active devices, writing their name and storing binary paths if needed. write_entry("test") for device in active_devices: if device.name == "host_preprocessor": # We are actually just running spirv-opt on the SPIR-V shaders. write_entry("spirv-opt") else: write_entry(device.name) if device.HasField("preprocess"): spirv_opt_path = binaries.get_binary_path_by_name( binaries_util.SPIRV_OPT_NAME ).path if device.HasField("swift_shader"): swift_shader_path = binaries.get_binary_path_by_name( binaries_util.SWIFT_SHADER_NAME ).path if device.HasField("swift_shader") or device.HasField("host"): amber_path = binaries.get_binary_path_by_name( binaries_util.AMBER_NAME ).path write_newline() # Enumerate tests and devices, writing the results. for test in sorted(tests_dir.glob("*.amber")): test_name = util.remove_end(test.name, ".amber") write_entry(test_name) spirv_shaders = sorted( tests_dir.glob(util.remove_end(test.name, "amber") + "*.spv") ) for device in active_devices: test_run_dir = work_dir / f"{test_name}_{device.name}" util.mkdirs_p(test_run_dir) try: # Confusingly, some functions below will raise on an error; others will write e.g. CRASH to the # STATUS file in the output directory. In the latter case, we update |status|. We check |status| at # the end of this if-else chain and raise fake exceptions if appropriate. status = fuzz.STATUS_SUCCESS if device.HasField("preprocess"): # This just means spirv-op for now. assert spirv_opt_path # noqa for spirv_shader in spirv_shaders: spirv_opt_util.run_spirv_opt_on_spirv_shader( spirv_shader, test_run_dir, ["-O"], spirv_opt_path ) elif device.HasField("shader_compiler"): for spirv_shader in spirv_shaders: shader_compiler_util.run_shader( shader_compiler_device=device.shader_compiler, shader_path=spirv_shader, output_dir=test_run_dir, compiler_path=binaries.get_binary_path_by_name( device.shader_compiler.binary ).path, timeout=DEFAULT_TIMEOUT, ) elif device.HasField("swift_shader"): assert swift_shader_path # noqa assert amber_path # noqa host_device_util.run_amber( test, test_run_dir, amber_path=amber_path, dump_image=False, dump_buffer=False, icd=swift_shader_path, ) status = result_util.get_status(test_run_dir) elif device.HasField("host"): assert amber_path # noqa host_device_util.run_amber( test, test_run_dir, amber_path=amber_path, dump_image=False, dump_buffer=False, ) status = result_util.get_status(test_run_dir) elif device.HasField("android"): android_device.run_amber_on_device( test, test_run_dir, dump_image=False, dump_buffer=False, serial=device.android.serial, ) status = result_util.get_status(test_run_dir) else: raise AssertionError(f"Unsupported device {device.name}") if status in (fuzz.STATUS_CRASH, fuzz.STATUS_TOOL_CRASH): raise CalledProcessError(1, "??") if status != fuzz.STATUS_SUCCESS: raise TimeoutExpired("??", fuzz.AMBER_RUN_TIME_LIMIT) write_entry("P") except CalledProcessError: write_entry("F") except TimeoutExpired: write_entry("T") write_newline()
def main_helper( # pylint: disable=too-many-locals, too-many-branches, too-many-statements; settings_path: Path, iteration_seed_override: Optional[int] = None, fuzzing_tool_pattern: Optional[List[FuzzingTool]] = None, allow_no_stack_traces: bool = False, override_sigint: bool = True, use_amber_vulkan_loader: bool = False, active_device_names: Optional[List[str]] = None, update_ignored_crash_signatures_gerrit_cookie: Optional[str] = None, ) -> None: if not fuzzing_tool_pattern: fuzzing_tool_pattern = [FuzzingTool.GLSL_FUZZ] util.update_gcov_environment_variable_if_needed() if override_sigint: interrupt_util.override_sigint() try_get_root_file() settings = settings_util.read_or_create(settings_path) binary_manager = binaries_util.get_default_binary_manager( settings=settings) temp_dir = Path() / "temp" # Note: we use "is not None" so that if the user passes an empty Gerrit cookie, we still try to execute this code. if update_ignored_crash_signatures_gerrit_cookie is not None: git_tool = util.tool_on_path("git") downloaded_graphicsfuzz_tests_dir = ( temp_dir / f"graphicsfuzz_cts_tests_{get_random_name()[:8]}") work_dir = temp_dir / f"graphicsfuzz_cts_run_{get_random_name()[:8]}" download_cts_gf_tests.download_cts_graphicsfuzz_tests( git_tool=git_tool, cookie=update_ignored_crash_signatures_gerrit_cookie, output_tests_dir=downloaded_graphicsfuzz_tests_dir, ) download_cts_gf_tests.extract_shaders( tests_dir=downloaded_graphicsfuzz_tests_dir, binaries=binary_manager) with util.file_open_text(work_dir / "results.csv", "w") as results_out_handle: run_cts_gf_tests.main_helper( tests_dir=downloaded_graphicsfuzz_tests_dir, work_dir=work_dir, binaries=binary_manager, settings=settings, active_devices=devices_util.get_active_devices( settings.device_list), results_out_handle=results_out_handle, updated_settings_output_path=settings_path, ) return active_devices = devices_util.get_active_devices( settings.device_list, active_device_names=active_device_names) # Add host_preprocessor device from device list if it is missing. if not active_devices[0].HasField("preprocess"): for device in settings.device_list.devices: if device.HasField("preprocess"): active_devices.insert(0, device) break # Add host_preprocessor device (from scratch) if it is still missing. if not active_devices[0].HasField("preprocess"): active_devices.insert( 0, Device(name="host_preprocessor", preprocess=DevicePreprocess())) reports_dir = Path() / "reports" fuzz_failures_dir = reports_dir / FUZZ_FAILURES_DIR_NAME references_dir = Path() / REFERENCES_DIR donors_dir = Path() / DONORS_DIR spirv_fuzz_shaders_dir = Path() / "spirv_fuzz_shaders" # Log a warning if there is no tool on the PATH for printing stack traces. prepended = util.prepend_catchsegv_if_available([], log_warning=True) if not allow_no_stack_traces and not prepended: raise AssertionError("Stopping because we cannot get stack traces.") spirv_fuzz_shaders: List[Path] = [] references: List[Path] = [] if FuzzingTool.SPIRV_FUZZ in fuzzing_tool_pattern: check_dir_exists(spirv_fuzz_shaders_dir) spirv_fuzz_shaders = sorted(spirv_fuzz_shaders_dir.rglob("*.json")) if FuzzingTool.GLSL_FUZZ in fuzzing_tool_pattern: check_dir_exists(references_dir) check_dir_exists(donors_dir) # TODO: make GraphicsFuzz find donors recursively. references = sorted(references_dir.rglob("*.json")) # Filter to only include .json files that have at least one shader (.frag, .vert, .comp) file. references = [ ref for ref in references if shader_job_util.get_related_files(ref) ] if use_amber_vulkan_loader: library_path = binary_manager.get_binary_path_by_name( binaries_util.AMBER_VULKAN_LOADER_NAME).path.parent util.add_library_paths_to_environ([library_path], os.environ) fuzzing_tool_index = 0 while True: interrupt_util.interrupt_if_needed() # We have to use "is not None" because the seed could be 0. if iteration_seed_override is not None: iteration_seed = iteration_seed_override else: iteration_seed = secrets.randbits(ITERATION_SEED_BITS) log(f"Iteration seed: {iteration_seed}") random.seed(iteration_seed) staging_name = get_random_name()[:8] staging_dir = temp_dir / staging_name try: util.mkdir_p_new(staging_dir) except FileExistsError: if iteration_seed_override is not None: raise log(f"Staging directory already exists: {str(staging_dir)}") log("Starting new iteration.") continue # Pseudocode: # - Create test_dir(s) in staging directory. # - Run test_dir(s) on all active devices (stop early if appropriate). # - For each test failure on each device, copy the test to reports_dir, adding the device and crash signature. # - Reduce each report (on the given device). # - Produce a summary for each report. fuzzing_tool = fuzzing_tool_pattern[fuzzing_tool_index] fuzzing_tool_index = (fuzzing_tool_index + 1) % len(fuzzing_tool_pattern) if fuzzing_tool == FuzzingTool.SPIRV_FUZZ: fuzz_spirv_test.fuzz_spirv( staging_dir, reports_dir, fuzz_failures_dir, active_devices, spirv_fuzz_shaders, settings, binary_manager, ) elif fuzzing_tool == FuzzingTool.GLSL_FUZZ: fuzz_glsl_test.fuzz_glsl( staging_dir, reports_dir, fuzz_failures_dir, active_devices, references, donors_dir, settings, binary_manager, ) else: raise AssertionError(f"Unknown fuzzing tool: {fuzzing_tool}") if iteration_seed_override is not None: log("Stopping due to iteration_seed") break shutil.rmtree(staging_dir)
def main() -> None: parser = argparse.ArgumentParser( description= "Fuzz devices using glsl-fuzz and/or spirv-fuzz to generate tests. " "By default, repeatedly generates tests using glsl-fuzz. " "You can instead specify the number of times each tool will run; " "glsl-fuzz runs G times, then spirv-fuzz runs S times, then the pattern repeats. " "By default, G=0 and S=0, in which case glsl-fuzz is hardcoded to run. " 'Each run of glsl-fuzz/spirv-fuzz uses a random "iteration seed", which can be used to replay the invocation of the tool and the steps that follow. ', formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) parser.add_argument( "--settings", help="Path to the settings JSON file for this fuzzing instance.", default=str(settings_util.DEFAULT_SETTINGS_FILE_PATH), ) parser.add_argument( "--iteration_seed", help= "The seed to use for one fuzzing iteration (useful for reproducing an issue).", ) parser.add_argument( "--glsl_fuzz_iterations", metavar="G", help= "Run glsl-fuzz G times to generate some tests, before moving on to the next tool.", action="store", default=0, type=int, ) parser.add_argument( "--spirv_fuzz_iterations", metavar="S", help= "Run spirv-fuzz S times to generate some tests, before moving on to the next tool.", action="store", default=0, type=int, ) parser.add_argument( "--allow_no_stack_traces", help= "Continue even if we cannot get stack traces (using catchsegv or cdb).", action="store_true", ) parser.add_argument( "--active_device", help= "Add an active device name, overriding those in the settings.json file. " "Ignored when --update_ignored_crash_signatures is passed." "Can be used multiple times to add multiple devices. " "E.g. --active_device host --active_device host_with_alternative_icd. " "This allows sharing a single settings.json file between multiple instances of gfauto_fuzz. " "Note that a host_preprocessor device will automatically be added as the first active device, if it is missing. ", action="append", ) parser.add_argument( "--update_ignored_crash_signatures", metavar="GERRIT_COOKIE", help= "When passed, gfauto will download and run the existing GraphicsFuzz AmberScript tests from Khronos vk-gl-cts on " "the active devices listed in the settings.json file. " "It will then update the ignored_crash_signatures field for each active device in the settings.json file based on the crash signatures seen. " "Requires Git. Requires Khronos membership. Obtain the Gerrit cookie as follows. " + download_cts_gf_tests.GERRIT_COOKIE_INSTRUCTIONS, action="store", default=None, type=str, ) parsed_args = parser.parse_args(sys.argv[1:]) settings_path = Path(parsed_args.settings) iteration_seed: Optional[ int] = None if parsed_args.iteration_seed is None else int( parsed_args.iteration_seed) glsl_fuzz_iterations: int = parsed_args.glsl_fuzz_iterations spirv_fuzz_iterations: int = parsed_args.spirv_fuzz_iterations allow_no_stack_traces: bool = parsed_args.allow_no_stack_traces active_device_names: Optional[List[str]] = parsed_args.active_device update_ignored_crash_signatures_gerrit_cookie: Optional[str] = ( parsed_args.update_ignored_crash_signatures) # E.g. [GLSL_FUZZ, GLSL_FUZZ, SPIRV_FUZZ] will run glsl-fuzz twice, then spirv-fuzz once, then repeat. fuzzing_tool_pattern = get_fuzzing_tool_pattern( glsl_fuzz_iterations=glsl_fuzz_iterations, spirv_fuzz_iterations=spirv_fuzz_iterations, ) with util.file_open_text(Path(f"log_{get_random_name()}.txt"), "w") as log_file: gflogging.push_stream_for_logging(log_file) try: main_helper( settings_path, iteration_seed, fuzzing_tool_pattern, allow_no_stack_traces, active_device_names=active_device_names, update_ignored_crash_signatures_gerrit_cookie= update_ignored_crash_signatures_gerrit_cookie, ) except settings_util.NoSettingsFile as exception: log(str(exception)) finally: gflogging.pop_stream_for_logging()
def fuzz_glsl( # pylint: disable=too-many-locals; staging_dir: Path, reports_dir: Path, fuzz_failures_dir: Path, active_devices: List[Device], references: List[Path], donors_dir: Path, settings: Settings, binary_manager: binaries_util.BinaryManager, ) -> None: staging_name = staging_dir.name template_source_dir = staging_dir / "source_template" # Pick a randomly chosen reference. unprepared_reference_shader_job: Path = random.choice(references) # The "graphicsfuzz-tool" tool is designed to be on your PATH so that e.g. ".bat" will be appended on Windows. # So we use tool_on_path with a custom PATH to get the actual file we want to execute. graphicsfuzz_tool_path = util.tool_on_path( "graphicsfuzz-tool", str( binary_manager.get_binary_path_by_name( "graphicsfuzz-tool").path.parent), ) try: with util.file_open_text(staging_dir / "log.txt", "w") as log_file: try: gflogging.push_stream_for_logging(log_file) # Create the prepared (for Vulkan GLSL) reference. glsl_generate_util.run_prepare_reference( graphicsfuzz_tool_path, unprepared_reference_shader_job, template_source_dir / test_util.REFERENCE_DIR / test_util.SHADER_JOB, legacy_graphics_fuzz_vulkan_arg=settings. legacy_graphics_fuzz_vulkan_arg, ) # Generate the variant (GraphicsFuzz requires the unprepared reference as input). glsl_generate_util.run_generate( graphicsfuzz_tool_path, unprepared_reference_shader_job, donors_dir, template_source_dir / test_util.VARIANT_DIR / test_util.SHADER_JOB, seed=str( random.getrandbits( glsl_generate_util.GENERATE_SEED_BITS)), other_args=list(settings.extra_graphics_fuzz_generate_args) if settings.extra_graphics_fuzz_generate_args else None, legacy_graphics_fuzz_vulkan_arg=settings. legacy_graphics_fuzz_vulkan_arg, ) finally: gflogging.pop_stream_for_logging() except subprocess.CalledProcessError: util.mkdirs_p(fuzz_failures_dir) if len(list( fuzz_failures_dir.iterdir())) < settings.maximum_fuzz_failures: util.copy_dir(staging_dir, fuzz_failures_dir / staging_dir.name) return reference_name = unprepared_reference_shader_job.stem stable_shader = reference_name.startswith("stable_") common_spirv_args = list(settings.common_spirv_args) test_dirs = [ make_test( template_source_dir, staging_dir / f"{staging_name}_no_opt_test", spirv_opt_args=None, binary_manager=binary_manager, derived_from=reference_name, stable_shader=stable_shader, common_spirv_args=common_spirv_args, ), make_test( template_source_dir, staging_dir / f"{staging_name}_opt_O_test", spirv_opt_args=["-O"], binary_manager=binary_manager, derived_from=reference_name, stable_shader=stable_shader, common_spirv_args=common_spirv_args, ), ] if not settings.spirv_opt_just_o: test_dirs += [ make_test( template_source_dir, staging_dir / f"{staging_name}_opt_Os_test", spirv_opt_args=["-Os"], binary_manager=binary_manager, derived_from=reference_name, stable_shader=stable_shader, common_spirv_args=common_spirv_args, ), make_test( template_source_dir, staging_dir / f"{staging_name}_opt_rand1_test", spirv_opt_args=spirv_opt_util.random_spirv_opt_args(), binary_manager=binary_manager, derived_from=reference_name, stable_shader=stable_shader, common_spirv_args=common_spirv_args, ), make_test( template_source_dir, staging_dir / f"{staging_name}_opt_rand2_test", spirv_opt_args=spirv_opt_util.random_spirv_opt_args(), binary_manager=binary_manager, derived_from=reference_name, stable_shader=stable_shader, common_spirv_args=common_spirv_args, ), make_test( template_source_dir, staging_dir / f"{staging_name}_opt_rand3_test", spirv_opt_args=spirv_opt_util.random_spirv_opt_args(), binary_manager=binary_manager, derived_from=reference_name, stable_shader=stable_shader, common_spirv_args=common_spirv_args, ), ] for test_dir in test_dirs: interrupt_util.interrupt_if_needed() if handle_test(test_dir, reports_dir, active_devices, binary_manager, settings): # If we generated a report, don't bother trying other optimization combinations. break
def run_shader_job( # pylint: disable=too-many-return-statements,too-many-branches, too-many-locals, too-many-statements; source_dir: Path, output_dir: Path, binary_manager: binaries_util.BinaryManager, test: Optional[Test] = None, device: Optional[Device] = None, ignore_test_and_device_binaries: bool = False, shader_job_overrides: Iterable[tool.NameAndShaderJob] = (), shader_job_shader_overrides: Optional[ tool.ShaderJobNameToShaderOverridesMap] = None, ) -> Path: if not shader_job_shader_overrides: shader_job_shader_overrides = {} with util.file_open_text(output_dir / "log.txt", "w") as log_file: try: gflogging.push_stream_for_logging(log_file) # TODO: Find amber path. NDK or host. # TODO: If Amber is going to be used, check if Amber can use Vulkan debug layers now, and if not, pass that # info down via a bool. if not test: test = test_util.metadata_read_from_path( source_dir / test_util.TEST_METADATA) if not device: device = test.device log(f"Running test on device:\n{device.name}") # We will create a binary_manager child with a restricted set of binaries so that we only use the binaries # specified in the test and by the device; if some required binaries are not specified by the test nor the # device, there will be an error instead of falling back to our default binaries. But we keep a reference to # the parent so we can still access certain "test-independent" binaries like Amber. binary_manager_parent = binary_manager if not ignore_test_and_device_binaries: binary_manager = binary_manager.get_child_binary_manager( list(device.binaries) + list(test.binaries)) spirv_opt_hash: Optional[str] = None spirv_opt_args: Optional[List[str]] = None if test.glsl.spirv_opt_args or test.spirv_fuzz.spirv_opt_args: spirv_opt_hash = binary_manager.get_binary_by_name( binaries_util.SPIRV_OPT_NAME).version spirv_opt_args = (list(test.glsl.spirv_opt_args) if test.glsl.spirv_opt_args else list( test.spirv_fuzz.spirv_opt_args)) shader_jobs = tool.get_shader_jobs(source_dir, overrides=shader_job_overrides) combined_spirv_shader_jobs: List[tool.SpirvCombinedShaderJob] = [] for shader_job in shader_jobs: try: shader_overrides = shader_job_shader_overrides.get( shader_job.name, None) combined_spirv_shader_jobs.append( tool.compile_shader_job( name=shader_job.name, input_json=shader_job.shader_job, work_dir=output_dir / shader_job.name, binary_paths=binary_manager, spirv_opt_args=spirv_opt_args, shader_overrides=shader_overrides, )) except subprocess.CalledProcessError: result_util.write_status(output_dir, fuzz.STATUS_TOOL_CRASH, shader_job.name) return output_dir except subprocess.TimeoutExpired: result_util.write_status(output_dir, fuzz.STATUS_TOOL_TIMEOUT, shader_job.name) return output_dir # Device types: |preprocess| and |shader_compiler| don't need an AmberScript file. # noinspection PyTypeChecker if device.HasField("preprocess"): # The "preprocess" device type just needs to get this far, so this is a success. result_util.write_status(output_dir, fuzz.STATUS_SUCCESS) return output_dir # noinspection PyTypeChecker if device.HasField("shader_compiler"): for combined_spirv_shader_job in combined_spirv_shader_jobs: try: shader_compiler_util.run_shader_job( device.shader_compiler, combined_spirv_shader_job.spirv_shader_job, output_dir, binary_manager=binary_manager, ) except subprocess.CalledProcessError: result_util.write_status( output_dir, fuzz.STATUS_CRASH, combined_spirv_shader_job.name, ) return output_dir except subprocess.TimeoutExpired: result_util.write_status( output_dir, fuzz.STATUS_TIMEOUT, combined_spirv_shader_job.name, ) return output_dir # The shader compiler succeeded on all files; this is a success. result_util.write_status(output_dir, fuzz.STATUS_SUCCESS) return output_dir # Other device types need an AmberScript file. amber_converter_shader_job_files = [ amber_converter.ShaderJobFile( name_prefix=combined_spirv_shader_job.name, asm_spirv_shader_job_json=combined_spirv_shader_job. spirv_asm_shader_job, glsl_source_json=combined_spirv_shader_job. glsl_source_shader_job, processing_info="", ) for combined_spirv_shader_job in combined_spirv_shader_jobs ] # Check if the first is the reference shader; if so, pull it out into its own variable. reference: Optional[amber_converter.ShaderJobFile] = None variants = amber_converter_shader_job_files if (amber_converter_shader_job_files[0].name_prefix == test_util.REFERENCE_DIR): reference = amber_converter_shader_job_files[0] variants = variants[1:] elif len(variants) > 1: raise AssertionError( "More than one variant, but no reference. This is unexpected." ) amber_script_file = amber_converter.spirv_asm_shader_job_to_amber_script( shader_job_file_amber_test=amber_converter. ShaderJobFileBasedAmberTest(reference_asm_spirv_job=reference, variants_asm_spirv_job=variants), output_amber_script_file_path=output_dir / "test.amber", amberfy_settings=amber_converter.AmberfySettings( spirv_opt_args=spirv_opt_args, spirv_opt_hash=spirv_opt_hash), ) is_compute = bool( shader_job_util.get_related_files( combined_spirv_shader_jobs[0].spirv_shader_job, [shader_job_util.EXT_COMP], )) # noinspection PyTypeChecker if device.HasField("host") or device.HasField("swift_shader"): icd: Optional[Path] = None # noinspection PyTypeChecker if device.HasField("swift_shader"): icd = binary_manager.get_binary_path_by_name( binaries_util.SWIFT_SHADER_NAME).path # Run the test on the host using Amber. host_device_util.run_amber( amber_script_file, output_dir, amber_path=binary_manager_parent.get_binary_path_by_name( binaries_util.AMBER_NAME).path, dump_image=(not is_compute), dump_buffer=is_compute, icd=icd, ) return output_dir # noinspection PyTypeChecker if device.HasField("android"): android_device.run_amber_on_device( amber_script_file, output_dir, dump_image=(not is_compute), dump_buffer=is_compute, serial=device.android.serial, ) return output_dir # TODO: For a remote device (which we will probably need to support), use log_a_file to output the # "amber_log.txt" file. raise AssertionError(f"Unhandled device type:\n{str(device)}") finally: gflogging.pop_stream_for_logging()
def fuzz_spirv( # pylint: disable=too-many-locals; staging_dir: Path, reports_dir: Path, fuzz_failures_dir: Path, active_devices: List[Device], spirv_fuzz_shaders: List[Path], settings: Settings, binary_manager: binaries_util.BinaryManager, ) -> None: staging_name = staging_dir.name template_source_dir = staging_dir / "source_template" reference_spirv_shader_job_orig_path: Path = random.choice( spirv_fuzz_shaders) # Copy in a randomly chosen reference. reference_spirv_shader_job = shader_job_util.copy( reference_spirv_shader_job_orig_path, template_source_dir / test_util.REFERENCE_DIR / test_util.SHADER_JOB, language_suffix=shader_job_util.SUFFIXES_SPIRV_FUZZ_INPUT, ) try: with util.file_open_text(staging_dir / "log.txt", "w") as log_file: try: gflogging.push_stream_for_logging(log_file) spirv_fuzz_util.run_generate_on_shader_job( binary_manager.get_binary_path_by_name("spirv-fuzz").path, reference_spirv_shader_job, template_source_dir / test_util.VARIANT_DIR / test_util.SHADER_JOB, donor_shader_job_paths=spirv_fuzz_shaders, seed=str( random.getrandbits( spirv_fuzz_util.GENERATE_SEED_BITS)), other_args=list(settings.extra_spirv_fuzz_generate_args) + list(settings.common_spirv_args), ) finally: gflogging.pop_stream_for_logging() except subprocess.CalledProcessError: util.mkdirs_p(fuzz_failures_dir) if len(list( fuzz_failures_dir.iterdir())) < settings.maximum_fuzz_failures: util.copy_dir(staging_dir, fuzz_failures_dir / staging_dir.name) return reference_name = reference_spirv_shader_job_orig_path.stem stable_shader = reference_name.startswith("stable_") common_spirv_args = list(settings.common_spirv_args) test_dirs = [ make_test( template_source_dir, staging_dir / f"{staging_name}_no_opt_test", spirv_opt_args=None, binary_manager=binary_manager, derived_from=reference_name, stable_shader=stable_shader, common_spirv_args=common_spirv_args, ), make_test( template_source_dir, staging_dir / f"{staging_name}_opt_O_test", spirv_opt_args=["-O"], binary_manager=binary_manager, derived_from=reference_name, stable_shader=stable_shader, common_spirv_args=common_spirv_args, ), make_test( template_source_dir, staging_dir / f"{staging_name}_opt_Os_test", spirv_opt_args=["-Os"], binary_manager=binary_manager, derived_from=reference_name, stable_shader=stable_shader, common_spirv_args=common_spirv_args, ), make_test( template_source_dir, staging_dir / f"{staging_name}_opt_rand1_test", spirv_opt_args=spirv_opt_util.random_spirv_opt_args(), binary_manager=binary_manager, derived_from=reference_name, stable_shader=stable_shader, common_spirv_args=common_spirv_args, ), make_test( template_source_dir, staging_dir / f"{staging_name}_opt_rand2_test", spirv_opt_args=spirv_opt_util.random_spirv_opt_args(), binary_manager=binary_manager, derived_from=reference_name, stable_shader=stable_shader, common_spirv_args=common_spirv_args, ), make_test( template_source_dir, staging_dir / f"{staging_name}_opt_rand3_test", spirv_opt_args=spirv_opt_util.random_spirv_opt_args(), binary_manager=binary_manager, derived_from=reference_name, stable_shader=stable_shader, common_spirv_args=common_spirv_args, ), ] for test_dir in test_dirs: interrupt_util.interrupt_if_needed() if handle_test(test_dir, reports_dir, active_devices, binary_manager, settings): # If we generated a report, don't bother trying other optimization combinations. break
def run_spirv_reduce_or_shrink( # pylint: disable=too-many-locals; source_dir: Path, name_of_shader_job_to_reduce: str, extension_to_reduce: str, output_dir: Path, preserve_semantics: bool, binary_manager: binaries_util.BinaryManager, settings: Settings, ) -> Path: test = test_util.metadata_read_from_source_dir(source_dir) input_shader_job = source_dir / name_of_shader_job_to_reduce / test_util.SHADER_JOB original_spirv_file = input_shader_job.with_suffix( extension_to_reduce + shader_job_util.SUFFIX_SPIRV_ORIG) transformed_spirv_file = input_shader_job.with_suffix( extension_to_reduce + shader_job_util.SUFFIX_SPIRV) transformations_file = input_shader_job.with_suffix( extension_to_reduce + shader_job_util.SUFFIX_TRANSFORMATIONS) util.mkdirs_p(output_dir) final_shader = output_dir / "final.spv" # E.g. transformation_suffix_to_reduce == ".frag.transformations" # E.g. ".frag.??" -> ".frag.spv" shader_suffix_to_override = extension_to_reduce + shader_job_util.SUFFIX_SPIRV if preserve_semantics: cmd = [ str(binary_manager.get_binary_path_by_name("spirv-fuzz").path), str(original_spirv_file), "-o", str(final_shader), f"--shrink={str(transformations_file)}", f"--shrinker-temp-file-prefix={str(output_dir / 'temp_')}", ] cmd += list(settings.extra_spirv_fuzz_shrink_args) cmd += list(test.common_spirv_args) cmd += [ # This ensures the arguments that follow are all positional arguments. "--", "gfauto_interestingness_test", str(source_dir), # --override_shader requires three parameters to follow; the third will be added by spirv-fuzz (the shader.spv file). "--override_shader", name_of_shader_job_to_reduce, shader_suffix_to_override, ] else: cmd = [ str(binary_manager.get_binary_path_by_name("spirv-reduce").path), str(transformed_spirv_file), "-o", str(final_shader), f"--temp-file-prefix={str(output_dir / 'temp_')}", ] cmd += list(settings.extra_spirv_reduce_args) cmd += list(test.common_spirv_args) cmd += [ # This ensures the arguments that follow are all positional arguments. "--", "gfauto_interestingness_test", str(source_dir), # --override_shader requires three parameters to follow; the third will be added by spirv-reduce (the shader.spv file). "--override_shader", name_of_shader_job_to_reduce, shader_suffix_to_override, ] # Log the reduction. with util.file_open_text(output_dir / "command.log", "w") as f: gflogging.push_stream_for_logging(f) try: # The reducer can fail, but it will typically output an exception file, so we can ignore the exit code. subprocess_util.run(cmd, verbose=True, check_exit_code=False) finally: gflogging.pop_stream_for_logging() return final_shader
def main() -> None: # pylint: disable=too-many-locals; parser = argparse.ArgumentParser( description="Generates an AmberScript test from a shader job.") parser.add_argument( "shader_job", help="The input .json shader job file.", ) parser.add_argument( "--output", help="Output directory.", default="output", ) parser.add_argument( "--spirv_opt_args", help= "Arguments for spirv-opt as a space-separated string, or an empty string to skip running spirv-opt.", default="", ) parser.add_argument( "--settings", help= "Path to a settings JSON file for this instance. The file will be generated if needed. ", default="settings.json", ) parsed_args = parser.parse_args(sys.argv[1:]) shader_job: Path = Path(parsed_args.shader_job) out_dir: Path = Path(parsed_args.output) spirv_opt_args_str: str = parsed_args.spirv_opt_args settings_path: Path = Path(parsed_args.settings) spirv_opt_args: List[str] = [] if spirv_opt_args_str: spirv_opt_args = spirv_opt_args_str.split(" ") settings = settings_util.read_or_create(settings_path) binary_manager = binaries_util.get_default_binary_manager(settings) staging_dir = out_dir / "staging" template_source_dir = staging_dir / "source_template" test_dir = staging_dir / "test" run_output_dir: Path = out_dir / "run" # Remove stale directories. if staging_dir.is_dir(): shutil.rmtree(staging_dir) if run_output_dir.is_dir(): shutil.rmtree(run_output_dir) # Create source template and call |make_test|. if shader_job_util.get_related_suffixes_that_exist( shader_job, language_suffix=[shader_job_util.SUFFIX_SPIRV]): # This is a SPIR-V shader job. shader_job_util.copy( shader_job, template_source_dir / test_util.VARIANT_DIR / test_util.SHADER_JOB, language_suffix=shader_job_util.SUFFIXES_SPIRV_FUZZ_INPUT, ) fuzz_spirv_amber_test.make_test( template_source_dir, test_dir, spirv_opt_args=spirv_opt_args, binary_manager=binary_manager, derived_from=shader_job.stem, stable_shader=False, common_spirv_args=list(settings.common_spirv_args), ) elif shader_job_util.get_related_suffixes_that_exist( shader_job, language_suffix=[shader_job_util.SUFFIX_GLSL]): # This is a GLSL shader job. # The "graphicsfuzz-tool" tool is designed to be on your PATH so that e.g. ".bat" will be appended on Windows. # So we use tool_on_path with a custom PATH to get the actual file we want to execute. graphicsfuzz_tool_path = util.tool_on_path( "graphicsfuzz-tool", str( binary_manager.get_binary_path_by_name( "graphicsfuzz-tool").path.parent), ) with util.file_open_text(staging_dir / "log.txt", "w") as log_file: try: gflogging.push_stream_for_logging(log_file) # Create the prepared (for Vulkan GLSL) reference. glsl_generate_util.run_prepare_reference( graphicsfuzz_tool_path, shader_job, template_source_dir / test_util.VARIANT_DIR / test_util.SHADER_JOB, legacy_graphics_fuzz_vulkan_arg=settings. legacy_graphics_fuzz_vulkan_arg, ) finally: gflogging.pop_stream_for_logging() fuzz_glsl_amber_test.make_test( template_source_dir, test_dir, spirv_opt_args=spirv_opt_args, binary_manager=binary_manager, derived_from=shader_job.stem, stable_shader=False, common_spirv_args=list(settings.common_spirv_args), ) else: raise AssertionError( "Unexpected shader job type; expected GLSL or SPIR-V shaders.") preprocessor_cache = util.CommandCache() fuzz_test_util.run_shader_job( source_dir=test_util.get_source_dir(test_dir), output_dir=run_output_dir, binary_manager=binary_manager, device=Device(host=DeviceHost()), preprocessor_cache=preprocessor_cache, stop_after_amber=True, )
def main() -> None: # pylint: disable=too-many-locals,too-many-branches,too-many-statements; parser = argparse.ArgumentParser( description="Runs GraphicsFuzz AmberScript tests on the active devices listed in " "the settings.json file.", formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) parser.add_argument( "--settings", help="Path to the settings JSON file for this instance.", default=str(settings_util.DEFAULT_SETTINGS_FILE_PATH), ) parser.add_argument( "--tests", help="Path to the directory of AmberScript tests with shaders extracted.", default="graphicsfuzz", ) parser.add_argument( "--update_ignored_signatures", help="As the tests are run for each device, add any crash signatures to the device's ignored_crash_signatures " "property and write out the updated settings.json file.", action="store_true", ) parser.add_argument( "--results_out", help="Output file path for the CSV results table.", default="results.csv", ) parsed_args = parser.parse_args(sys.argv[1:]) # Args. tests_dir: Path = Path(parsed_args.tests) settings_path: Path = Path(parsed_args.settings) update_ignored_signatures: bool = parsed_args.update_ignored_signatures results_out_path: Path = Path(parsed_args.results_out) # Settings and devices. settings = settings_util.read_or_create(settings_path) active_devices = devices_util.get_active_devices(settings.device_list) # Binaries. binaries = binaries_util.get_default_binary_manager(settings=settings) work_dir = Path() / "temp" / f"cts_run_{fuzz.get_random_name()[:8]}" util.mkdirs_p(work_dir) with util.file_open_text(results_out_path, "w") as results_handle: def write_entry(entry: str) -> None: results_handle.write(entry) results_handle.write(", ") results_handle.flush() def write_newline() -> None: results_handle.write("\n") results_handle.flush() spirv_opt_path: Optional[Path] = None swift_shader_path: Optional[Path] = None amber_path: Optional[Path] = None # Small hack to ensure we have three devices for spirv-opt, each with a different name. main_spirv_opt_device: Optional[Device] = None if active_devices and active_devices[0].name == "host_preprocessor": main_spirv_opt_device = active_devices[0] main_spirv_opt_device.name = SPIRV_OPT_O spirv_opt_custom = Device() spirv_opt_custom.CopyFrom(main_spirv_opt_device) spirv_opt_custom.name = SPIRV_OPT_CUSTOM active_devices.insert(1, spirv_opt_custom) spirv_opt_os = Device() spirv_opt_os.CopyFrom(main_spirv_opt_device) spirv_opt_os.name = SPIRV_OPT_OS active_devices.insert(1, spirv_opt_os) # Enumerate active devices, writing their name and storing binary paths if needed. write_entry("test") for device in active_devices: write_entry(device.name) if device.HasField("preprocess"): spirv_opt_path = binaries.get_binary_path_by_name( binaries_util.SPIRV_OPT_NAME ).path if device.HasField("swift_shader"): swift_shader_path = binaries.get_binary_path_by_name( binaries_util.SWIFT_SHADER_NAME ).path if device.HasField("swift_shader") or device.HasField("host"): amber_path = binaries.get_binary_path_by_name( binaries_util.AMBER_NAME ).path write_newline() # Enumerate tests and devices, writing the results. for test in sorted(tests_dir.glob("*.amber")): test_name = util.remove_end(test.name, ".amber") write_entry(test_name) spirv_shaders = sorted( tests_dir.glob(util.remove_end(test.name, "amber") + "*.spv") ) for device in active_devices: test_run_dir = work_dir / f"{test_name}_{device.name}" util.mkdirs_p(test_run_dir) ignored_signatures_set: Set[str] = set(device.ignored_crash_signatures) with util.file_open_text(test_run_dir / "log.txt", "w") as log_stream: try: gflogging.push_stream_for_logging(log_stream) if device.HasField("preprocess"): # This just means spirv-opt for now. assert spirv_opt_path # noqa assert main_spirv_opt_device # noqa # Pick spirv-opt arguments based on device name. if device.name == SPIRV_OPT_O: spirv_opt_args = ["-O"] elif device.name == SPIRV_OPT_OS: spirv_opt_args = ["-Os"] elif device.name == SPIRV_OPT_CUSTOM: spirv_opt_args = ( spirv_opt_util.OPT_INTERESTING_SUBSET_OF_PASSES ) else: raise AssertionError( f"Can't tell how to run device {device.name}; " f"must be named host_preprocessor and be the first active device." ) # Reset device and ignored_crash_signatures. device = main_spirv_opt_device ignored_signatures_set = set( device.ignored_crash_signatures ) try: for spirv_shader in spirv_shaders: spirv_opt_util.run_spirv_opt_on_spirv_shader( spirv_shader, test_run_dir, spirv_opt_args, spirv_opt_path, ) result_util.write_status( test_run_dir, fuzz.STATUS_SUCCESS, ) except subprocess.CalledProcessError: result_util.write_status( test_run_dir, fuzz.STATUS_TOOL_CRASH, ) except subprocess.TimeoutExpired: result_util.write_status( test_run_dir, fuzz.STATUS_TOOL_TIMEOUT, ) elif device.HasField("shader_compiler"): try: for spirv_shader in spirv_shaders: shader_compiler_util.run_shader( shader_compiler_device=device.shader_compiler, shader_path=spirv_shader, output_dir=test_run_dir, compiler_path=binaries.get_binary_path_by_name( device.shader_compiler.binary ).path, timeout=DEFAULT_TIMEOUT, ) result_util.write_status( test_run_dir, fuzz.STATUS_SUCCESS, ) except subprocess.CalledProcessError: result_util.write_status( test_run_dir, fuzz.STATUS_CRASH, ) except subprocess.TimeoutExpired: result_util.write_status( test_run_dir, fuzz.STATUS_TIMEOUT, ) elif device.HasField("swift_shader"): assert swift_shader_path # noqa assert amber_path # noqa host_device_util.run_amber( test, test_run_dir, amber_path=amber_path, dump_image=False, dump_buffer=False, icd=swift_shader_path, ) elif device.HasField("host"): assert amber_path # noqa host_device_util.run_amber( test, test_run_dir, amber_path=amber_path, dump_image=False, dump_buffer=False, custom_launcher=list(device.host.custom_launcher), ) elif device.HasField("android"): android_device.run_amber_on_device( test, test_run_dir, dump_image=False, dump_buffer=False, serial=device.android.serial, ) else: raise AssertionError(f"Unsupported device {device.name}") finally: gflogging.pop_stream_for_logging() status = result_util.get_status(test_run_dir) if status == fuzz.STATUS_SUCCESS: write_entry("P") elif status in (fuzz.STATUS_TIMEOUT, fuzz.STATUS_TOOL_TIMEOUT): write_entry("T") else: write_entry("F") # Update ignored signatures. if ( status in ( fuzz.STATUS_TOOL_CRASH, fuzz.STATUS_CRASH, fuzz.STATUS_UNRESPONSIVE, ) and update_ignored_signatures ): log_contents = util.file_read_text( result_util.get_log_path(test_run_dir) ) signature = signature_util.get_signature_from_log_contents( log_contents ) if signature == signature_util.NO_SIGNATURE: log(f"NOT updating ignored signatures to include {signature}") elif signature in ignored_signatures_set: log(f"Signature is already ignored: {signature}") else: log(f"Adding ignored signature: {signature}") device.ignored_crash_signatures.append(signature) write_newline() if update_ignored_signatures: # Reset main_spirv_opt_device name before writing it back out. if main_spirv_opt_device: main_spirv_opt_device.name = "host_preprocessor" settings_util.write(settings, settings_path)
def main_helper( # pylint: disable=too-many-locals,too-many-branches,too-many-statements; tests_dir: Path, work_dir: Path, binaries: binaries_util.BinaryManager, settings: Settings, active_devices: List[Device], results_out_handle: Optional[TextIO], updated_settings_output_path: Optional[Path], ) -> None: util.mkdirs_p(work_dir) def write_entry(entry: str) -> None: if not results_out_handle: return results_out_handle.write(entry) results_out_handle.write(", ") results_out_handle.flush() def write_newline() -> None: if not results_out_handle: return results_out_handle.write("\n") results_out_handle.flush() spirv_opt_path: Optional[Path] = None swift_shader_path: Optional[Path] = None amber_path: Optional[Path] = None # Small hack to ensure we have three devices for spirv-opt, each with a different name. main_spirv_opt_device: Optional[Device] = None if active_devices and active_devices[0].name == "host_preprocessor": main_spirv_opt_device = active_devices[0] main_spirv_opt_device.name = SPIRV_OPT_O spirv_opt_custom = Device() spirv_opt_custom.CopyFrom(main_spirv_opt_device) spirv_opt_custom.name = SPIRV_OPT_CUSTOM active_devices.insert(1, spirv_opt_custom) spirv_opt_os = Device() spirv_opt_os.CopyFrom(main_spirv_opt_device) spirv_opt_os.name = SPIRV_OPT_OS active_devices.insert(1, spirv_opt_os) # Enumerate active devices, writing their name and storing binary paths if needed. write_entry("test") for device in active_devices: write_entry(device.name) if device.HasField("preprocess"): spirv_opt_path = binaries.get_binary_path_by_name( binaries_util.SPIRV_OPT_NAME).path if device.HasField("swift_shader"): swift_shader_path = binaries.get_binary_path_by_name( binaries_util.SWIFT_SHADER_NAME).path if device.HasField("swift_shader") or device.HasField("host"): amber_path = binaries.get_binary_path_by_name( binaries_util.AMBER_NAME).path write_newline() # Enumerate tests and devices, writing the results. for test in sorted(tests_dir.glob("*.amber")): test_name = util.remove_end(test.name, ".amber") write_entry(test_name) spirv_shaders = sorted( tests_dir.glob(util.remove_end(test.name, "amber") + "*.spv")) for device in active_devices: test_run_dir = work_dir / f"{test_name}_{device.name}" util.mkdirs_p(test_run_dir) ignored_signatures_set: Set[str] = set( device.ignored_crash_signatures) with util.file_open_text(test_run_dir / "log.txt", "w") as log_stream: try: gflogging.push_stream_for_logging(log_stream) if device.HasField("preprocess"): # This just means spirv-opt for now. assert spirv_opt_path # noqa assert main_spirv_opt_device # noqa # Pick spirv-opt arguments based on device name. if device.name == SPIRV_OPT_O: spirv_opt_args = ["-O"] elif device.name == SPIRV_OPT_OS: spirv_opt_args = ["-Os"] elif device.name == SPIRV_OPT_CUSTOM: spirv_opt_args = (spirv_opt_util. OPT_INTERESTING_SUBSET_OF_PASSES) else: raise AssertionError( f"Can't tell how to run device {device.name}; " f"must be named host_preprocessor and be the first active device." ) # Reset device and ignored_crash_signatures. device = main_spirv_opt_device ignored_signatures_set = set( device.ignored_crash_signatures) try: for spirv_shader in spirv_shaders: spirv_opt_util.run_spirv_opt_on_spirv_shader( spirv_shader, test_run_dir, spirv_opt_args, spirv_opt_path, ) result_util.write_status( test_run_dir, fuzz.STATUS_SUCCESS, ) except subprocess.CalledProcessError: result_util.write_status( test_run_dir, fuzz.STATUS_TOOL_CRASH, ) except subprocess.TimeoutExpired: result_util.write_status( test_run_dir, fuzz.STATUS_TOOL_TIMEOUT, ) elif device.HasField("shader_compiler"): try: for spirv_shader in spirv_shaders: shader_compiler_util.run_shader( shader_compiler_device=device. shader_compiler, shader_path=spirv_shader, output_dir=test_run_dir, compiler_path=binaries. get_binary_path_by_name( device.shader_compiler.binary).path, timeout=DEFAULT_TIMEOUT, ) result_util.write_status( test_run_dir, fuzz.STATUS_SUCCESS, ) except subprocess.CalledProcessError: result_util.write_status( test_run_dir, fuzz.STATUS_CRASH, ) except subprocess.TimeoutExpired: result_util.write_status( test_run_dir, fuzz.STATUS_TIMEOUT, ) elif device.HasField("swift_shader"): assert swift_shader_path # noqa assert amber_path # noqa host_device_util.run_amber( test, test_run_dir, amber_path=amber_path, dump_image=False, dump_buffer=False, icd=swift_shader_path, ) elif device.HasField("host"): assert amber_path # noqa host_device_util.run_amber( test, test_run_dir, amber_path=amber_path, dump_image=False, dump_buffer=False, custom_launcher=list(device.host.custom_launcher), ) elif device.HasField("android"): android_device.run_amber_on_device( test, test_run_dir, dump_image=False, dump_buffer=False, serial=device.android.serial, ) else: raise AssertionError( f"Unsupported device {device.name}") finally: gflogging.pop_stream_for_logging() status = result_util.get_status(test_run_dir) if status == fuzz.STATUS_SUCCESS: write_entry("P") elif status in (fuzz.STATUS_TIMEOUT, fuzz.STATUS_TOOL_TIMEOUT): write_entry("T") else: write_entry("F") # Update ignored signatures. if (status in ( fuzz.STATUS_TOOL_CRASH, fuzz.STATUS_CRASH, fuzz.STATUS_UNRESPONSIVE, ) and updated_settings_output_path): log_contents = util.file_read_text( result_util.get_log_path(test_run_dir)) signature = signature_util.get_signature_from_log_contents( log_contents) if signature == signature_util.NO_SIGNATURE: log(f"NOT updating ignored signatures to include {signature}" ) elif signature in ignored_signatures_set: log(f"Signature is already ignored: {signature}") else: log(f"Adding ignored signature: {signature}") device.ignored_crash_signatures.append(signature) write_newline() if updated_settings_output_path: # Reset main_spirv_opt_device name before writing it back out. if main_spirv_opt_device: main_spirv_opt_device.name = "host_preprocessor" settings_util.write(settings, updated_settings_output_path)
def artifact_execute_recipe( # pylint: disable=too-many-branches; artifact_path: str = "", only_if_artifact_json_missing: bool = False, built_in_recipes: Optional[RecipeMap] = None, ) -> None: artifact_path = artifact_path_absolute(artifact_path) executing_lock_file_path = artifact_get_inner_file_path( ARTIFACT_EXECUTING_LOCK_FILE_NAME, artifact_path) busy_waiting = False first_wait = True # We may have to retry if another process appears to be executing this recipe. while True: if busy_waiting: time.sleep(BUSY_WAIT_IN_SECONDS) if first_wait: log(f"Waiting for {artifact_path} due to lock file {executing_lock_file_path}" ) first_wait = False if executing_lock_file_path.exists(): # Retry. busy_waiting = True continue # Several processes can still execute here concurrently; the above check is just an optimization. # The metadata file should be written atomically once the artifact is ready for use, so if it exists, we can # just return. if (only_if_artifact_json_missing and artifact_get_metadata_file_path(artifact_path).exists()): return # The recipe file should be written atomically. If it exists, we are fine to continue. If not and if we have # the recipe in |built_in_recipes|, more than one process might write it, but the final rename from TEMP_FILE -> # RECIPE.json is atomic, so *some* process will succeed and the contents will be valid. Thus, we should be fine # to continue once we have written the recipe. if (not artifact_get_recipe_file_path(artifact_path).exists() and built_in_recipes): built_in_recipe = built_in_recipes[artifact_path] if not built_in_recipe: raise FileNotFoundError( str(artifact_get_recipe_file_path(artifact_path))) # This is atomic; should not fail. artifact_write_recipe(built_in_recipe, artifact_path) recipe = artifact_read_recipe(artifact_path) # Create EXECUTING_LOCK file. The "x" means exclusive creation. This will fail if the file already exists; # i.e. another process won the race and is executing the recipe; if so, we retry from the beginning of this # function (and will return early). Otherwise, we can continue. We don't need to keep the file open; the file # is not opened with exclusive access, just created exclusively. try: with util.file_open_text(executing_lock_file_path, "x") as lock_file: lock_file.write("locked") except FileExistsError: # Retry. busy_waiting = True continue # If we fail here (e.g. KeyboardInterrupt), we won't remove the lock file. But any alternative will either have # the same problem (interrupts can happen at almost any time) or could end up accidentally removing the lock # file made by another process, so this is the safest approach. Users can manually delete lock files if needed; # the log output indicates the file on which we are blocked. try: with util.file_open_text( artifact_get_recipe_log_file_path(artifact_path), "w") as f: gflogging.push_stream_for_logging(f) try: if recipe.HasField("download_and_extract_archive_set"): recipe_download_and_extract_archive_set.recipe_download_and_extract_archive_set( recipe.download_and_extract_archive_set, artifact_path) else: raise NotImplementedError( "Artifact {} has recipe type {} and this is not implemented" .format(artifact_path, recipe.WhichOneof("recipe"))) finally: gflogging.pop_stream_for_logging() finally: # Delete the lock file when we have finished. Ignore errors. try: executing_lock_file_path.unlink() except OSError: log(f"WARNING: failed to delete: {str(executing_lock_file_path)}" )
def main() -> None: # pylint: disable=too-many-locals,too-many-branches,too-many-statements; parser = argparse.ArgumentParser( description= "Runs GraphicsFuzz AmberScript tests on the active devices listed in " "the settings.json file.", formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) parser.add_argument( "--settings", help="Path to the settings JSON file for this instance.", default=str(settings_util.DEFAULT_SETTINGS_FILE_PATH), ) parser.add_argument( "--tests", help= "Path to the directory of AmberScript tests with shaders extracted.", default="graphicsfuzz", ) parser.add_argument( "--update_ignored_crash_signatures", help= "As the tests are run for each device, add any crash signatures to the device's ignored_crash_signatures " "property and write out the updated settings.json file.", action="store_true", ) parser.add_argument( "--results_out", help="Output file path for the CSV results table.", default="results.csv", ) parsed_args = parser.parse_args(sys.argv[1:]) # Args. tests_dir: Path = Path(parsed_args.tests) settings_path: Path = Path(parsed_args.settings) update_ignored_crash_signatures: bool = parsed_args.update_ignored_crash_signatures results_out_path: Path = Path(parsed_args.results_out) # Settings and devices. settings = settings_util.read_or_create(settings_path) active_devices = devices_util.get_active_devices(settings.device_list) # Binaries. binaries = binaries_util.get_default_binary_manager(settings=settings) work_dir = Path( ) / "temp" / f"graphicsfuzz_cts_run_{fuzz.get_random_name()[:8]}" with util.file_open_text(results_out_path, "w") as results_out_handle: main_helper( tests_dir=tests_dir, work_dir=work_dir, binaries=binaries, settings=settings, active_devices=active_devices, results_out_handle=results_out_handle, updated_settings_output_path=( settings_path if update_ignored_crash_signatures else None), )