def test_build_report_files_tuple(self, mock_result_files_dict) -> None: """Test if the mappings from commit hash to successful and failed report files are correctly returned as tuple.""" mock_result_files: tp.Dict[ShortCommitHash, tp.List[Path]] = defaultdict(list) mock_result_files[ShortCommitHash("5e8fe1616d") ] = [self.br_paths_list[3]] mock_result_files[ShortCommitHash("e64923e69e") ] = [self.br_paths_list[4]] mock_result_files_dict.return_value = mock_result_files report_files_tuple = build_report_files_tuple( self.case_study.project_name, self.case_study ) successful_revisions: tp.Dict[ShortCommitHash, Path] = { ShortCommitHash('5e8fe1616d'): self.br_paths_list[3], ShortCommitHash('e64923e69e'): self.br_paths_list[4], } failed_revisions: tp.Dict[ShortCommitHash, tp.List[Path]] = {} self.assertEqual( report_files_tuple, (successful_revisions, failed_revisions) )
def test_get_successor_report_file(self, mock_result_files_dict) -> None: """Test if the correct succeeding report file of a report is found.""" mock_result_files = defaultdict(list) mock_result_files["5e8fe1616d"] = [self.br_paths_list[3]] mock_result_files["e64923e69e"] = [self.br_paths_list[4]] mock_result_files_dict.return_value = mock_result_files report_files, _ = build_report_files_tuple( self.case_study.project_name, self.case_study ) sampled_revs = get_processed_revisions( self.case_study.project_name, BlameReport ) short_time_id_cache: tp.Dict[ShortCommitHash, int] = { rev: self.commit_map.short_time_id(rev) for rev in sampled_revs } successor_of_e6 = get_successor_report_file( ShortCommitHash("e64923e69e"), self.commit_map, short_time_id_cache, report_files, sampled_revs ) successor_of_5e = get_successor_report_file( ShortCommitHash("5e8fe1616d"), self.commit_map, short_time_id_cache, report_files, sampled_revs ) self.assertEqual(successor_of_e6, self.br_paths_list[3]) self.assertEqual(successor_of_5e, None)
def test_short_status_color( self, mock_get_tagged_experiment_specific_revisions) -> None: """ Check if the case study can show a short status. Currently this only checks if the output is correctly generated but not if the colors are present. """ # Revision not in set mock_get_tagged_experiment_specific_revisions.return_value = [ (ShortCommitHash('42b25e7f15'), FileStatusExtension.SUCCESS) ] status = PCM.get_short_status(self.case_study, MockExperiment, 5, True) self.assertEqual(status, 'CS: gzip_1: ( 0/10) processed [0/0/0/0/10/0]') mock_get_tagged_experiment_specific_revisions.assert_called() mock_get_tagged_experiment_specific_revisions.reset_mock() mock_get_tagged_experiment_specific_revisions.return_value = [ (ShortCommitHash('b8b25e7f15'), FileStatusExtension.SUCCESS) ] status = PCM.get_short_status(self.case_study, MockExperiment, 5, True) self.assertEqual(status, 'CS: gzip_1: ( 1/10) processed [1/0/0/0/9/0]') mock_get_tagged_experiment_specific_revisions.assert_called()
def test_short_status( self, mock_get_tagged_experiment_specific_revisions) -> None: """Check if the case study can show a short status.""" # block a revision mocked_gzip_source = block_revisions([SingleRevision("7620b81735") ])(DummyGit(remote="/dev/null", local="/dev/null")) self.project_source_mock.return_value = mocked_gzip_source # Revision not in set mock_get_tagged_experiment_specific_revisions.return_value = [ (ShortCommitHash('42b25e7f15'), FileStatusExtension.SUCCESS) ] status = PCM.get_short_status(self.case_study, MockExperiment, 5) self.assertEqual(status, 'CS: gzip_1: ( 0/10) processed [0/0/0/0/9/1]') mock_get_tagged_experiment_specific_revisions.assert_called() mock_get_tagged_experiment_specific_revisions.reset_mock() mock_get_tagged_experiment_specific_revisions.return_value = [ (ShortCommitHash('b8b25e7f15'), FileStatusExtension.SUCCESS) ] status = PCM.get_short_status(self.case_study, MockExperiment, 5) self.assertEqual(status, 'CS: gzip_1: ( 1/10) processed [1/0/0/0/8/1]') mock_get_tagged_experiment_specific_revisions.assert_called()
def test_get_commit(self): """Check if the correct commit hash is returned.""" self.assertEqual( ReportFilename(self.success_filename).commit_hash, ShortCommitHash("7bb9ef5f8c")) self.assertEqual( ReportFilename(self.fail_filename).commit_hash, ShortCommitHash("7bb9ef5f8c"))
def generate_get_tagged_revisions_output( ) -> tp.List[tp.Tuple[ShortCommitHash, FileStatusExtension]]: """Generate get_tagged_revisions output for mocking.""" return [(ShortCommitHash('rev1000000'), FileStatusExtension.SUCCESS), (ShortCommitHash('rev2000000'), FileStatusExtension.BLOCKED), (ShortCommitHash('rev3000000'), FileStatusExtension.COMPILE_ERROR), (ShortCommitHash('rev4000000'), FileStatusExtension.FAILED), (ShortCommitHash('rev5000000'), FileStatusExtension.MISSING)]
def test_get_commit(self) -> None: """Check if the correct commit hash is returned.""" self.assertEqual( self.commit_report_success.filename.commit_hash, ShortCommitHash("7bb9ef5f8c") ) self.assertEqual( self.commit_report_fail.filename.commit_hash, ShortCommitHash("7bb9ef5f8c") )
def test_short_time_id(self) -> None: """Test short time id look up.""" self.assertEqual( self.cmap.short_time_id(ShortCommitHash("ae332f2a5d")), 1 ) self.assertEqual( self.cmap.short_time_id(ShortCommitHash("ef58a957a6c1")), 0 ) self.assertEqual( self.cmap.short_time_id(ShortCommitHash("20540be618")), 32 )
def __split_commit_data(self, commit: pygit2.Commit, column: int) -> tp.Any: if column == 0: return ShortCommitHash(commit.hex).hash if column == 1: return commit.author.name if column == 2: tzinfo = timezone(timedelta(minutes=commit.author.offset)) date = datetime.fromtimestamp(float(commit.author.time), tzinfo) return QDateTime(date) if column == 3: return self._cmap.short_time_id(ShortCommitHash(commit.hex))
def test_get_all_revisions_between_short(self): """Check if the correct all revisions are correctly found.""" repo_path = get_local_project_git_path("brotli") revs = get_all_revisions_between( '5692e422da6af1e991f9182345d58df87866bc5e', '2f9277ff2f2d0b4113b1ffd9753cc0f6973d354a', ShortCommitHash, repo_path) self.assertSetEqual( set(revs), { ShortCommitHash("5692e422da"), ShortCommitHash("2f9277ff2f"), ShortCommitHash("63be8a9940"), ShortCommitHash("2a51a85aa8") })
def compile(self) -> None: """Compile the project.""" x264_version_source = local.path(self.source_of_primary) x264_version = ShortCommitHash(self.version_of_primary) fpic_revisions = get_all_revisions_between( "5dc0aae2f900064d1f58579929a2285ab289a436", "290de9638e5364c37316010ac648a6c959f6dd26", ShortCommitHash, x264_version_source) ldflags_revisions = get_all_revisions_between( "6490f4398d9e28e65d7517849e729e14eede8c5b", "275ef5332dffec445a0c5a78dbc00c3e0766011d", ShortCommitHash, x264_version_source) if x264_version in fpic_revisions: self.cflags += ["-fPIC"] clang = bb.compiler.cc(self) with local.cwd(x264_version_source): with local.env(CC=str(clang)): configure_flags = ["--disable-asm"] if x264_version in ldflags_revisions: configure_flags.append("--extra-ldflags=\"-static\"") bb.watch(local["./configure"])(configure_flags) bb.watch(make)("-j", get_number_of_jobs(bb_cfg())) verify_binaries(self)
def __compile_make(self) -> None: libssh_source = local.path(self.source_of(self.primary_source)) libssh_version = ShortCommitHash(self.version_of_primary) autoconf_revisions = get_all_revisions_between( "5e02c25291d594e01a910fce097a3fc5084fd68f", "21e639cc3fd54eb3d59568744c9627beb26e07ed", ShortCommitHash, libssh_source ) autogen_revisions = get_all_revisions_between( "ca32b0aa146b31d7772f27d16098845e615432aa", "ee54acb417c5589a8dc9dab0676f34b3d40a182b", ShortCommitHash, libssh_source ) compiler = bb.compiler.cc(self) with local.cwd(libssh_source): with local.env(CC=str(compiler)): if libssh_version in autogen_revisions: bb.watch("./autogen.sh")() if libssh_version in autoconf_revisions: bb.watch("autoreconf")() configure = bb.watch(local["./configure"]) configure() bb.watch(make)("-j", get_number_of_jobs(bb_cfg())) verify_binaries(self)
def plot(self, view_mode: bool) -> None: project_name: str = self.plot_kwargs["case_study"].project_name revision = get_commit_map(project_name).convert_to_full_or_warn( ShortCommitHash(self.plot_kwargs["revision"])) def create_node_data(node: NodeTy, commit: CommitRepoPair, cig: nx.DiGraph) -> ChordPlotNodeInfo: del node del cig return {"info": commit.commit_hash.short_hash, "color": 1} def create_edge_data(source_commit: CommitRepoPair, sink_commit: CommitRepoPair, amount: int) -> ChordPlotEdgeInfo: return { "size": amount, "color": 1, "info": f"{source_commit.commit_hash.short_hash} " f"--{{{amount}}}--> " f"{sink_commit.commit_hash.short_hash}" } nodes, edges = _prepare_cig_plotly(project_name, revision, create_node_data, create_edge_data) figure = make_chord_plot(nodes, edges, "Commit Interaction Graph") if view_mode: figure.show() else: offply.plot(figure, filename=self.plot_file_name("html"))
def compile(self) -> None: """Compile the project.""" brotli_version_source = local.path(self.source_of_primary) brotli_git_path = get_local_project_git_path(self.NAME) brotli_version = ShortCommitHash(self.version_of_primary) with local.cwd(brotli_git_path): configure_revisions = get_all_revisions_between( "f9ab24a7aaee93d5932ba212e5e3d32e4306f748", "5814438791fb2d4394b46e5682a96b68cd092803", ShortCommitHash) simple_make_revisions = get_all_revisions_between( "e1739826c04a9944672b99b98249dda021bdeb36", "378485b097fd7b80a5e404a3cb912f7b18f78cdb", ShortCommitHash) c_compiler = bb.compiler.cc(self) if brotli_version in simple_make_revisions: with local.cwd(brotli_version_source / "tools"): bb.watch(make)("-j", get_number_of_jobs(bb_cfg())) elif brotli_version in configure_revisions: with local.cwd(brotli_version_source): with local.env(CC=str(c_compiler)): bb.watch(local["./configure"])() bb.watch(make)("-j", get_number_of_jobs(bb_cfg())) else: mkdir(brotli_version_source / "out") with local.cwd(brotli_version_source / "out"): with local.env(CC=str(c_compiler)): bb.watch(local["../configure-cmake"])() bb.watch(make)("-j", get_number_of_jobs(bb_cfg())) with local.cwd(brotli_version_source): verify_binaries(self)
def extend_with_distrib_sampling(case_study: CaseStudy, cmap: CommitMap, sampling_method: NormalSamplingMethod, merge_stage: int, num_rev: int, ignore_blocked: bool, only_code_commits: bool) -> None: """ Extend a case study by sampling 'num_rev' new revisions, according to distribution specified with kwargs['distribution']. Args: case_study: to extend cmap: commit map to map revisions to unique IDs sampling_method: distribution to use for sampling merge_stage: stage the revisions will be added to num_rev: number of revisions to add ignore_blocked: ignore_blocked revisions only_code_commits: exclude commits which don't change code """ is_blocked: tp.Callable[[ShortCommitHash, tp.Type[Project]], bool] = lambda rev, _: False if ignore_blocked: is_blocked = is_revision_blocked is_code_commit: tp.Callable[[ShortCommitHash], bool] = lambda rev: True if only_code_commits: churn_conf = ChurnConfig.create_c_style_languages_config() project_git_path = get_local_project_git_path(case_study.project_name) def is_c_cpp_code_commit(commit: ShortCommitHash) -> bool: return contains_source_code(commit, project_git_path, churn_conf) is_code_commit = is_c_cpp_code_commit # Needs to be sorted so the propability distribution over the length # of the list is the same as the distribution over the commits age history project_cls = get_project_cls_by_name(case_study.project_name) revision_list = [ (FullCommitHash(rev), idx) for rev, idx in sorted(list(cmap.mapping_items()), key=lambda x: x[1]) if not case_study.has_revision_in_stage(ShortCommitHash(rev), merge_stage) and not is_blocked(ShortCommitHash(rev), project_cls) and is_code_commit(ShortCommitHash(rev)) ] case_study.include_revisions( sampling_method.sample_n(revision_list, num_rev), merge_stage)
def test_accessors(self): """Tests if the different accessor functions work.""" self.assertEqual(self.report_filename.commit_hash, ShortCommitHash("7bb9ef5f8c")) self.assertEqual(self.report_filename.report_shorthand, "CR") self.assertEqual(self.report_filename.experiment_shorthand, "CRE") self.assertEqual(self.report_filename.file_status, FileStatusExtension.SUCCESS)
def test_contains_source_code_without(self) -> None: """Check if we can correctly identify commits with source code.""" churn_conf = ChurnConfig.create_c_style_languages_config() project_git_path = get_local_project_git_path('brotli') self.assertFalse( contains_source_code( ShortCommitHash('f4153a09f87cbb9c826d8fc12c74642bb2d879ea'), project_git_path, churn_conf)) self.assertFalse( contains_source_code( ShortCommitHash('e83c7b8e8fb8b696a1df6866bc46cbb76d7e0348'), project_git_path, churn_conf)) self.assertFalse( contains_source_code( ShortCommitHash('698e3a7f9d3000fa44174f5be415bf713f71bd0e'), project_git_path, churn_conf))
def test_contains_source_code_with(self) -> None: """Check if we can correctly identify commits without source code.""" churn_conf = ChurnConfig.create_c_style_languages_config() project_git_path = get_local_project_git_path('brotli') self.assertTrue( contains_source_code( ShortCommitHash('62662f87cdd96deda90ac817de94e3c4af75226a'), project_git_path, churn_conf)) self.assertTrue( contains_source_code( ShortCommitHash('27dd7265403d8e8fed99a854b9c3e1db7d79525f'), project_git_path, churn_conf)) # Merge commit of the previous one self.assertTrue( contains_source_code( ShortCommitHash('4ec67035c0d97c270c1c73038cc66fc5fcdfc120'), project_git_path, churn_conf))
def test_get_revision_not_in_case_study(self) -> None: """Check if we correctly handle the lookup of a revision that is not in the case study.""" vara_cfg()['paper_config']['current_config'] = "test_revision_lookup" load_paper_config() self.assertRaises(ValueError, MCS.get_revision_status_for_case_study, get_paper_config().get_case_studies('brotli')[0], ShortCommitHash('0000000000'), CR)
def test_has_revisions(self) -> None: """Check if certain revisions were loaded correctly.""" self.assertTrue( self.case_study.has_revision( FullCommitHash("b8b25e7f1593f6dcc20660ff9fb1ed59ede15b7a") ) ) self.assertTrue( self.case_study.has_revision(ShortCommitHash("b8b25e7f15")) ) self.assertTrue( self.case_study.has_revision(ShortCommitHash("a3db5806d01")) ) self.assertFalse( self.case_study.has_revision( FullCommitHash("42b25e7f1593f6dcc20660ff9fb1ed59ede15b7a") ) )
def test_valid_binary_lookup(self) -> None: """Check if we can correctly determine the list of valid binaries for a specified revision.""" self.rv_map.specify_binary("build/bin/SingleLocalSimple", BinaryType.EXECUTABLE) self.rv_map.specify_binary("build/bin/SingleLocalMultipleRegions", BinaryType.EXECUTABLE, only_valid_in=RevisionRange( "162db88346", "master")) test_query = self.rv_map[ShortCommitHash("162db88346")] self.assertSetEqual( {x.name for x in test_query}, {"SingleLocalSimple", "SingleLocalMultipleRegions"}) test_query = self.rv_map[ShortCommitHash("745424e3ae")] self.assertSetEqual({x.name for x in test_query}, {"SingleLocalSimple"})
def _gen_overview_data(tag_blocked: bool, **kwargs: tp.Any) -> tp.Dict[str, tp.List[int]]: case_study: CaseStudy = kwargs["case_study"] project_name = case_study.project_name commit_map: CommitMap = get_commit_map(project_name) project = get_project_cls_by_name(project_name) if 'report_type' in kwargs: result_file_type: tp.Type[BaseReport] = kwargs['report_type'] else: result_file_type = EmptyReport positions: tp.Dict[str, tp.List[int]] = { "background": [], "blocked": [], "blocked_all": [], "compile_error": [], "failed": [], "missing": [], "success": [] } for c_hash, index in commit_map.mapping_items(): if not case_study.has_revision(ShortCommitHash(c_hash)): positions["background"].append(index) if hasattr(project, "is_blocked_revision" ) and project.is_blocked_revision(c_hash)[0]: positions["blocked_all"].append(index) revisions = FileStatusDatabase.get_data_for_project( project_name, ["revision", "time_id", "file_status"], commit_map, case_study, result_file_type=result_file_type, tag_blocked=tag_blocked) positions["success"] = ( revisions[revisions["file_status"] == FileStatusExtension.SUCCESS. get_status_extension()])["time_id"].tolist() positions["failed"] = ( revisions[revisions["file_status"] == FileStatusExtension.FAILED. get_status_extension()])["time_id"].tolist() positions["blocked"] = ( revisions[revisions["file_status"] == FileStatusExtension.BLOCKED. get_status_extension()])["time_id"].tolist() positions["blocked_all"].extend( (revisions[revisions["file_status"] == FileStatusExtension.BLOCKED. get_status_extension()])["time_id"].tolist()) positions["missing"] = ( revisions[revisions["file_status"] == FileStatusExtension.MISSING. get_status_extension()])["time_id"].tolist() positions["compile_error"] = ( revisions[revisions["file_status"] == FileStatusExtension. COMPILE_ERROR.get_status_extension()])["time_id"].tolist() return positions
def test_get_revisions_in_case_study(self) -> None: """Check if we correctly handle the lookup of a revision that is in a case study.""" vara_cfg()['paper_config']['current_config'] = "test_revision_lookup" load_paper_config() self.assertEqual( MCS.get_revision_status_for_case_study( get_paper_config().get_case_studies('brotli')[0], ShortCommitHash('21ac39f7c8'), CR), FileStatusExtension.SUCCESS)
def commit_hash(self) -> ShortCommitHash: """ Commit hash of the result file. Returns: the commit hash from a result file name """ match = ReportFilename.__RESULT_FILE_REGEX.search(self.filename) if match: return ShortCommitHash(match.group("file_commit_hash")) raise ValueError(f'File {self.filename} name was wrongly formatted.')
def compile(self) -> None: """Compile the project.""" libssh_source = local.path(self.source_of(self.primary_source)) libssh_version = ShortCommitHash(self.version_of_primary) cmake_revisions = get_all_revisions_between( "0151b6e17041c56813c882a3de6330c82acc8d93", "master", ShortCommitHash, libssh_source ) if libssh_version in cmake_revisions: self.__compile_cmake() else: self.__compile_make()
def test_specification_binaries_with_special_entry_point(self) -> None: """Check if we can add binaries that have a special entry point.""" self.rv_map.specify_binary( "build/bin/SingleLocalSimple", BinaryType.EXECUTABLE, override_entry_point="build/bin/OtherSLSEntry") test_query = self.rv_map[ShortCommitHash("745424e3ae")] self.assertEqual("build/bin/OtherSLSEntry", str(test_query[0].entry_point)) self.assertIsInstance(test_query[0].entry_point, Path)
def test_status_color( self, mock_get_tagged_experiment_specific_revisions) -> None: """ Check if the case study can show a short status. Currently this only checks if the output is correctly generated but not if the colors are present. """ # Revision not in set mock_get_tagged_experiment_specific_revisions.return_value = [ (ShortCommitHash('42b25e7f15'), FileStatusExtension.SUCCESS) ] status = PCM.get_status(self.case_study, MockExperiment, 5, False, False, True) self.assertEqual( status, """CS: gzip_1: ( 0/10) processed [0/0/0/0/10/0] b8b25e7f15 [Missing] 7620b81735 [Missing] 622e9b1d02 [Missing] 8798d5c4fd [Missing] 2e654f9963 [Missing] edfad78619 [Missing] a3db5806d0 [Missing] e75f428c0d [Missing] 1e7e3769dc [Missing] 9872ba420c [Missing] """) mock_get_tagged_experiment_specific_revisions.assert_called() mock_get_tagged_experiment_specific_revisions.reset_mock() mock_get_tagged_experiment_specific_revisions.side_effect = cycle([[ (ShortCommitHash('b8b25e7f15'), FileStatusExtension.SUCCESS), (ShortCommitHash('a3db5806d0'), FileStatusExtension.SUCCESS), (ShortCommitHash('622e9b1d02'), FileStatusExtension.FAILED), (ShortCommitHash('1e7e3769dc'), FileStatusExtension.COMPILE_ERROR), (ShortCommitHash('2e654f9963'), FileStatusExtension.BLOCKED) ], [(ShortCommitHash('b8b25e7f15'), FileStatusExtension.SUCCESS)]]) status = PCM.get_status(self.case_study, MockExperimentMultiReport, 5, False, False, True) self.assertEqual( status, """CS: gzip_1: ( 1/10) processed [1/1/1/1/5/1] b8b25e7f15 [Success] 7620b81735 [Missing] 622e9b1d02 [Failed] 8798d5c4fd [Missing] 2e654f9963 [Blocked] edfad78619 [Missing] a3db5806d0 [Incomplete] e75f428c0d [Missing] 1e7e3769dc [CompileError] 9872ba420c [Missing] """) mock_get_tagged_experiment_specific_revisions.assert_called()
def test_total_status_color( self, mock_get_tagged_experiment_specific_revisions) -> None: """Check if the total status is correctly generated.""" total_status_occurrences: tp.DefaultDict[ FileStatusExtension, tp.Set[ShortCommitHash]] = defaultdict(set) # Revision not in set mock_get_tagged_experiment_specific_revisions.return_value = [ (ShortCommitHash('42b25e7f15'), FileStatusExtension.SUCCESS) ] PCM.get_status(self.case_study, MockExperiment, 5, False, False, True, total_status_occurrences) status = PCM.get_total_status(total_status_occurrences, 15, True) self.assertEqual( status, """-------------------------------------------------------------------------------- Total: ( 0/10) processed [0/0/0/0/10/0]""") mock_get_tagged_experiment_specific_revisions.assert_called() mock_get_tagged_experiment_specific_revisions.reset_mock() mock_get_tagged_experiment_specific_revisions.side_effect = cycle([[ (ShortCommitHash('b8b25e7f15'), FileStatusExtension.SUCCESS), (ShortCommitHash('a3db5806d0'), FileStatusExtension.SUCCESS), (ShortCommitHash('622e9b1d02'), FileStatusExtension.FAILED), (ShortCommitHash('1e7e3769dc'), FileStatusExtension.COMPILE_ERROR), (ShortCommitHash('2e654f9963'), FileStatusExtension.BLOCKED) ], [(ShortCommitHash('b8b25e7f15'), FileStatusExtension.SUCCESS)]]) PCM.get_status(self.case_study, MockExperimentMultiReport, 5, False, False, True, total_status_occurrences) status = PCM.get_total_status(total_status_occurrences, 15, True) self.assertEqual( status, """-------------------------------------------------------------------------------- Total: ( 1/15) processed [1/1/1/1/10/1]""") mock_get_tagged_experiment_specific_revisions.assert_called() # Care: The second block is duplicated to check if we prevent # adding the same revisions twice mock_get_tagged_experiment_specific_revisions.reset_mock() PCM.get_status(self.case_study, MockExperimentMultiReport, 5, False, False, True, total_status_occurrences) status = PCM.get_total_status(total_status_occurrences, 15, True) self.assertEqual( status, """-------------------------------------------------------------------------------- Total: ( 1/15) processed [1/1/1/1/10/1]""") mock_get_tagged_experiment_specific_revisions.assert_called()
def test_gen_filter(self) -> None: """Check if the project generates a revision filter.""" revision_filter = self.case_study.get_revision_filter() self.assertTrue( revision_filter( FullCommitHash("b8b25e7f1593f6dcc20660ff9fb1ed59ede15b7a") ) ) self.assertTrue(revision_filter(ShortCommitHash("b8b25e7f15"))) self.assertFalse( revision_filter( FullCommitHash("42b25e7f1593f6dcc20660ff9fb1ed59ede15b7a") ) )
def create_default_error_handler( experiment_handle: 'ExperimentHandle', project: Project, report_type: tp.Type[BaseReport], error_type: FileStatusExtension, output_folder: tp.Optional[Path] = None, binary: tp.Optional[ProjectBinaryWrapper] = None, timeout_duration: tp.Optional[str] = None, ) -> PEErrorHandler: """ Create a default PEErrorHandler based on the `project`, `report_type`. Args: experiment_handle: handle to the current experiment project: currently under analysis report_type: that should be generated error_type: a FSE describing the problem type output_folder: where the errors will be placed timeout_duration: set timeout binary: if only a specific binary is handled Retruns: a initialized PEErrorHandler """ error_output_folder = output_folder if output_folder else Path( f"{bb_cfg()['varats']['outfile']}/{project.name}" ) return PEErrorHandler( error_output_folder, str( experiment_handle.get_file_name( report_type.shorthand(), project_name=str(project.name), binary_name=binary.name if binary else "all", project_revision=ShortCommitHash(project.version_of_primary), project_uuid=str(project.run_uuid), extension_type=error_type ) ), timeout_duration=timeout_duration )