def analyze(self) -> actions.StepResult: """Run phasar's IDELinearConstantAnalysis analysis.""" if not self.obj: return actions.StepResult.ERROR project = self.obj # Add to the user-defined path for saving the results of the # analysis also the name and the unique id of the project of every # run. varats_result_folder = get_varats_result_folder(project) phasar = local["phasar-llvm"] for binary in project.binaries: bc_file = get_cached_bc_file_path(project, binary) result_file = create_new_success_result_filename( self.__experiment_handle, EmptyReport, project, binary ) phasar_params = ["-m", bc_file, "-C", "CHA", "-D", "ide-lca"] run_cmd = wrap_unlimit_stack_size(phasar[phasar_params]) run_cmd = (run_cmd > f'{varats_result_folder}/{result_file}') exec_func_with_pe_error_handler( run_cmd, create_default_analysis_failure_handler( self.__experiment_handle, project, EmptyReport, Path(varats_result_folder) ) ) return actions.StepResult.OK
def analyze(self) -> actions.StepResult: """Run VaRA-IPP utility pass and extract instrumentation point information.""" project = self.obj vara_result_folder = get_varats_result_folder(project) for binary in project.binaries: result_file = create_new_success_result_filename( self.__experiment_handle, VaraIPPReport, project, binary) # Need the following passes: # - vara-PFTDD to generate feature regions # - vara-IPP (Instrumentation Point Printer) opt_params = [ "--vara-PTFDD", "-vara-IPP", "-o", "/dev/null", get_cached_bc_file_path( project, binary, [BCFileExtensions.DEBUG, BCFileExtensions.FEATURE]) ] # Store the collected information in report. run_cmd = opt[opt_params] > str( vara_result_folder / str(result_file)) exec_func_with_pe_error_handler( run_cmd, create_default_analysis_failure_handler( self.__experiment_handle, project, VaraIPPReport, Path(vara_result_folder))) return actions.StepResult.OK
def analyze(self) -> actions.StepResult: """This step performs the actual analysis with the correct flags.""" project = self.obj # Define the output directory. vara_result_folder = get_varats_result_folder(project) for binary in project.binaries: # Define empty success file result_file = create_new_success_result_filename( self.__experiment_handle, FAR, project, binary) # Combine the input bitcode file's name bc_target_file = get_cached_bc_file_path(project, binary, self.__bc_file_extensions) opt_params = [ "-vara-PFA", "-S", "-vara-FAR", f"-vara-report-outfile={vara_result_folder}/{result_file}", str(bc_target_file), ] run_cmd = opt[opt_params] run_cmd = wrap_unlimit_stack_size(run_cmd) # Run the command with custom error handler and timeout exec_func_with_pe_error_handler( run_cmd, create_default_analysis_failure_handler( self.__experiment_handle, project, FAR, Path(vara_result_folder)))
def run_perf_tracing(self) -> actions.StepResult: """Execute the specified binaries of the project, in specific configurations, against one or multiple workloads.""" project: Project = self.obj print(f"PWD {os.getcwd()}") vara_result_folder = get_varats_result_folder(project) for binary in project.binaries: if binary.type != BinaryType.EXECUTABLE: continue result_file = create_new_success_result_filename( self.__experiment_handle, TEFReport, project, binary) with local.cwd(local.path(project.source_of_primary)): print(f"Currenlty at {local.path(project.source_of_primary)}") print(f"Bin path {binary.path}") # executable = local[f"{binary.path}"] with local.env( VARA_TRACE_FILE=f"{vara_result_folder}/{result_file}"): workload = "/tmp/countries-land-1km.geo.json" # TODO: figure out how to handle workloads binary("-k", workload) # TODO: figure out how to handle different configs #executable("--slow") # executable() return actions.StepResult.OK
def analyze(self) -> actions.StepResult: """ This step performs the actual analysis with the correct flags. Flags: -vara-CR: to run a commit flow report -vara-report-outfile=<path>: specify the path to store the results """ if not self.obj: return actions.StepResult.ERROR project = self.obj if self.__interaction_filter_experiment_name is None: interaction_filter_file = Path( self.INTERACTION_FILTER_TEMPLATE.format( experiment="CommitReportExperiment", project=str(project.name))) else: interaction_filter_file = Path( self.INTERACTION_FILTER_TEMPLATE.format( experiment=self.__interaction_filter_experiment_name, project=str(project.name))) if not interaction_filter_file.is_file(): raise Exception("Could not load interaction filter file \"" + str(interaction_filter_file) + "\"") # Add to the user-defined path for saving the results of the # analysis also the name and the unique id of the project of every # run. vara_result_folder = get_varats_result_folder(project) for binary in project.binaries: result_file = create_new_success_result_filename( self.__experiment_handle, CR, project, binary) opt_params = [ "-vara-BD", "-vara-CR", "-vara-init-commits", f"-vara-report-outfile={vara_result_folder}/{result_file}" ] if interaction_filter_file.is_file(): opt_params.append(f"-vara-cf-interaction-filter=" f"{str(interaction_filter_file)}") opt_params.append(str(get_cached_bc_file_path(project, binary))) run_cmd = opt[opt_params] timeout_duration = '8h' from benchbuild.utils.cmd import timeout # pylint: disable=C0415 exec_func_with_pe_error_handler( timeout[timeout_duration, run_cmd], create_default_analysis_failure_handler( self.__experiment_handle, project, CR, Path(vara_result_folder), timeout_duration=timeout_duration)) return actions.StepResult.OK
def test_result_folder_creation(self): """Checks if we get the correct result folder back.""" test_tmp_folder = str(os.getcwd()) bb_cfg()["varats"]["outfile"] = test_tmp_folder + "/results" result_folder = EU.get_varats_result_folder(BBTestProject()) self.assertEqual(test_tmp_folder + "/results/" + BBTestProject.NAME, str(result_folder)) self.assertTrue(result_folder.exists())
def analyze(self) -> actions.StepResult: """ This step performs the actual analysis with the correct command line flags. Flags used: * -vara-BD: activates Blame Detection * -vara-init-commits: let's the Blame Detection initialize Commits for Repos * -vara-verify-blameMD: activate BlameMDVerifier * -vara-verifier-options=: chooses between multiple print options * Status: prints if the module as a whole passed or failed """ if not self.obj: return actions.StepResult.ERROR project = self.obj # Add to the user-defined path for saving the results of the # analysis also the name and the unique id of the project of every # run. vara_result_folder = get_varats_result_folder(project) timeout_duration = '8h' for binary in project.binaries: bc_target_file = get_cached_bc_file_path(project, binary, self.bc_file_extensions) # Define empty success file. result_file = create_new_success_result_filename( self.__experiment_handle, self.__experiment_handle.report_spec().main_report, project, binary) # Define output file name of failed runs error_file = create_new_failed_result_filename( self.__experiment_handle, self.__experiment_handle.report_spec().main_report, project, binary) # Put together the path to the bc file and the opt command of vara vara_run_cmd = opt["-vara-BD", "-vara-init-commits", "-vara-verify-blameMD", "-vara-verifier-options=All", str(bc_target_file), "-o", "/dev/null"] exec_func_with_pe_error_handler( timeout[timeout_duration, vara_run_cmd] > f"{vara_result_folder}/{result_file}", PEErrorHandler(vara_result_folder, error_file.filename, timeout_duration)) return actions.StepResult.OK
def analyze(self) -> actions.StepResult: """ This step performs the actual analysis with the correct command line flags. Flags used: * -vara-BR: to run a commit flow report * -yaml-report-outfile=<path>: specify the path to store the results """ if not self.obj: return actions.StepResult.ERROR project = self.obj # Add to the user-defined path for saving the results of the # analysis also the name and the unique id of the project of every # run. vara_result_folder = get_varats_result_folder(project) for binary in project.binaries: result_file = create_new_success_result_filename( self.__experiment_handle, BR, project, binary ) opt_params = [ "-vara-BD", "-vara-BR", "-vara-init-commits", "-vara-use-phasar", f"-vara-blame-taint-scope={self.__blame_taint_scope.name}", f"-vara-report-outfile={vara_result_folder}/{result_file}", get_cached_bc_file_path( project, binary, [ BCFileExtensions.NO_OPT, BCFileExtensions.TBAA, BCFileExtensions.BLAME ] ) ] run_cmd = opt[opt_params] run_cmd = wrap_unlimit_stack_size(run_cmd) exec_func_with_pe_error_handler( run_cmd, create_default_analysis_failure_handler( self.__experiment_handle, project, BR, Path(vara_result_folder) ) ) return actions.StepResult.OK
def analyze(self) -> actions.StepResult: """This step performs the actual comparision, running the analysis with and without phasars global support.""" if not self.obj: return actions.StepResult.ERROR project = self.obj # Add to the user-defined path for saving the results of the # analysis also the name and the unique id of the project of every # run. vara_result_folder = get_varats_result_folder(project) for binary in project.binaries: if self.__globals_active: report_type: tp.Union[ tp.Type[GlobalsReportWith], tp.Type[GlobalsReportWithout]] = GlobalsReportWith else: report_type = GlobalsReportWithout result_file = create_new_success_result_filename( self.__experiment_handle, report_type, project, binary ) phasar_params = [ f"--auto-globals={'ON' if self.__globals_active else 'OFF'}", "-m", get_cached_bc_file_path( project, binary, [BCFileExtensions.NO_OPT, BCFileExtensions.TBAA] ), "-o", f"{vara_result_folder}/{result_file}" ] run_cmd = phasar_globals[phasar_params] run_cmd = wrap_unlimit_stack_size(run_cmd) exec_func_with_pe_error_handler( run_cmd, create_default_analysis_failure_handler( self.__experiment_handle, project, report_type, Path(vara_result_folder) ) ) return actions.StepResult.OK
def analyze(self) -> actions.StepResult: """Only create a report file.""" if not self.obj: return actions.StepResult.ERROR project = self.obj vara_result_folder = get_varats_result_folder(project) for binary in project.binaries: result_file = create_new_success_result_filename( self.__experiment_handle, EmptyReport, project, binary) run_cmd = touch[f"{vara_result_folder}/{result_file}"] exec_func_with_pe_error_handler( run_cmd, create_default_analysis_failure_handler( self.__experiment_handle, project, EmptyReport, Path(vara_result_folder))) return actions.StepResult.OK
def run_szz(self) -> actions.StepResult: """Prepare data needed for running SZZUnleashed.""" project: Project = self.obj run_dir = Path(project.source_of_primary).parent szzunleashed_jar = SZZUnleashed.install_location( ) / SZZUnleashed.get_jar_name() varats_result_folder = get_varats_result_folder(project) with local.cwd(run_dir): run_cmd = java["-jar", str(szzunleashed_jar), "-d", "1", "-i", str(run_dir / "issue_list.json"), "-r", project.source_of_primary] exec_func_with_pe_error_handler( run_cmd, create_default_analysis_failure_handler( self.__experiment_handle, project, SZZUnleashedReport, Path(varats_result_folder))) return actions.StepResult.OK
def create_report(self) -> actions.StepResult: """Create a report from SZZUnleashed data.""" project = self.obj varats_result_folder = get_varats_result_folder(project) run_dir = Path(project.source_of_primary).parent with (run_dir / "results" / "fix_and_introducers_pairs.json").open("r") as result_json: szz_result = json.load(result_json) bugs: tp.Dict[str, tp.Set[str]] = {} # entries are lists of the form [<fix>, <introducing>] for result_entry in szz_result: bugs.setdefault(result_entry[0], set()) bugs[result_entry[0]].add(result_entry[1]) raw_szz_report = { "szz_tool": SZZTool.SZZ_UNLEASHED.tool_name, "bugs": {k: sorted(list(v)) for k, v in bugs.items()} } result_file = SZZUnleashedReport.get_file_name( "SZZUnleashed", project_name=str(project.name), binary_name="none", # we don't rely on binaries in this experiment project_revision=project.version_of_primary, project_uuid=str(project.run_uuid), extension_type=FSE.SUCCESS) with open(f"{varats_result_folder}/{result_file}", "w") as yaml_file: yaml_file.write( yaml.dump_all([ VersionHeader.from_version_number( "SZZReport", 1).get_dict(), raw_szz_report ], explicit_start=True, explicit_end=True)) return actions.StepResult.OK
def create_report(self) -> actions.StepResult: """Create a report from SZZ data.""" project = self.obj bug_provider = BugProvider.get_provider_for_project(project) pygit_bugs = bug_provider.find_pygit_bugs() varats_result_folder = get_varats_result_folder(project) def commit_to_hash(commit: Commit) -> str: return str(commit.id) bugs: tp.Dict[str, tp.List[str]] = {} # entries are lists of the form [<fix>, <introducing>] for bug in pygit_bugs: bugs[commit_to_hash(bug.fixing_commit)] = sorted( [commit_to_hash(commit) for commit in bug.introducing_commits]) raw_szz_report = { "szz_tool": SZZTool.PYDRILLER_SZZ.tool_name, "bugs": bugs } result_file = PyDrillerSZZReport.get_file_name( "PyDrSZZ", project_name=str(project.name), binary_name="none", # we don't rely on binaries in this experiment project_revision=project.version_of_primary, project_uuid=str(project.run_uuid), extension_type=FSE.SUCCESS) with open(f"{varats_result_folder}/{result_file}", "w") as yaml_file: yaml_file.write( yaml.dump_all([ VersionHeader.from_version_number( "SZZReport", 1).get_dict(), raw_szz_report ], explicit_start=True, explicit_end=True)) return actions.StepResult.OK
def run(self) -> actions.StepResult: """Capture instrumentation stats by running the binary with a workload and attaching the UsdtExecutionStats.bt.""" project: Project = self.obj vara_result_folder = get_varats_result_folder(project) binary: ProjectBinaryWrapper for binary in project.binaries: if binary.type != BinaryType.EXECUTABLE: continue # Get workload to use. # TODO (se-sic/VaRA#841): refactor to bb workloads if possible workload_provider = WorkloadProvider.create_provider_for_project( project ) if not workload_provider: print( f"No workload provider for project={project.name}. " \ "Skipping." ) return actions.StepResult.CAN_CONTINUE workload = workload_provider.get_workload_for_binary(binary.name) if workload is None: print( f"No workload for project={project.name} " \ f"binary={binary.name}. Skipping." ) continue # Assemble Path for report. report_file_name = create_new_success_result_filename( self.__experiment_handle, VaraInstrumentationStatsReport, project, binary ) report_file = Path(vara_result_folder, str(report_file_name)) # Execute binary. with local.cwd(project.source_of_primary): run_cmd = binary[workload] # attach bpftrace to binary to allow tracing it via USDT bpftrace_script = Path( VaRA.install_location(), "share/vara/perf_bpf_tracing/UsdtExecutionStats.bt" ) # Assertion: Can be run without sudo password prompt. To # guarentee this, add an entry to /etc/sudoers. bpftrace_cmd = bpftrace["-o", report_file, bpftrace_script, binary.path] bpftrace_runner: Future with local.as_root(): bpftrace_runner = bpftrace_cmd & BG sleep(3) # give bpftrace time to start up # Run. run_cmd & FG # pylint: disable=W0104 # Wait for bpftrace running in background to exit. bpftrace_runner.wait() return actions.StepResult.OK